diff --git a/.gitignore b/.gitignore index de30c26c6..bc8cfd9e5 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ bin *.tgz tags.json .idea +.vscode/settings.json diff --git a/cmd/kube-mgmt/main.go b/cmd/kube-mgmt/main.go index 80b119b31..2f3482c5a 100644 --- a/cmd/kube-mgmt/main.go +++ b/cmd/kube-mgmt/main.go @@ -9,7 +9,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net/http" "os" "path" @@ -141,7 +140,7 @@ func run(params *params) { } if params.opaAuthFile != "" { - file, err := ioutil.ReadFile(params.opaAuthFile) + file, err := os.ReadFile(params.opaAuthFile) if err != nil { logrus.Fatalf("Failed to read opa auth token file %s", params.opaAuthFile) } @@ -162,7 +161,7 @@ func run(params *params) { if rootCAs == nil { rootCAs = x509.NewCertPool() } - certs, err := ioutil.ReadFile(params.opaCAFile) + certs, err := os.ReadFile(params.opaCAFile) if err != nil { logrus.Fatalf("Failed to read opa certificate authority file %s", params.opaCAFile) } diff --git a/go.mod b/go.mod index 083618f21..5fb9f0844 100644 --- a/go.mod +++ b/go.mod @@ -1,48 +1,58 @@ module github.com/open-policy-agent/kube-mgmt -go 1.18 +go 1.23 require ( github.com/sirupsen/logrus v1.8.3 github.com/spf13/cobra v1.3.0 - k8s.io/api v0.23.17 - k8s.io/apimachinery v0.23.17 - k8s.io/client-go v0.23.17 + k8s.io/api v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 ) -require gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect +require ( + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/x448/float16 v0.8.4 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect +) require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/go-logr/logr v1.2.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect; indire4ct - github.com/google/go-cmp v0.5.6 // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect - github.com/imdario/mergo v0.3.5 // indirect + github.com/golang/protobuf v1.5.4 // indirect; indire4ct + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.30.0 // indirect - k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 8fa62f459..1be2d5885 100644 --- a/go.sum +++ b/go.sum @@ -49,10 +49,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -63,7 +60,6 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -92,11 +88,13 @@ github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWH github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -108,14 +106,12 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -124,15 +120,18 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -167,11 +166,14 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -183,11 +185,13 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -206,17 +210,16 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= @@ -251,10 +254,12 @@ github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpT github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -270,13 +275,17 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -305,14 +314,13 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -321,8 +329,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -339,6 +348,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= @@ -348,7 +359,6 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.8.3 h1:DBBfY8eMYazKEJHb3JKpSPfpgd2mBCoNFlQx6C5fftU= github.com/sirupsen/logrus v1.8.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -358,18 +368,25 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -450,7 +467,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -477,8 +493,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -495,8 +511,9 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -593,8 +610,8 @@ golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -630,7 +647,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -650,10 +666,11 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -693,7 +710,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -726,7 +742,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -800,30 +815,29 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -834,29 +848,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.17 h1:gC11V5AIsNXUUa/xd5RQo7djukvl5O1ZDQKwEYu0H7g= -k8s.io/api v0.23.17/go.mod h1:upM9VIzXUjEyLTmGGi0KnH8kdlPnvgv+fEJ3tggDHfE= -k8s.io/apimachinery v0.23.17 h1:ipJ0SrpI6EzH8zVw0WhCBldgJhzIamiYIumSGTdFExY= -k8s.io/apimachinery v0.23.17/go.mod h1:87v5Wl9qpHbnapX1PSNgln4oO3dlyjAU3NSIwNhT4Lo= -k8s.io/client-go v0.23.17 h1:MbW05RO5sy+TFw2ds36SDdNSkJbr8DFVaaVrClSA8Vs= -k8s.io/client-go v0.23.17/go.mod h1:X5yz7nbJHS7q8977AKn8BWKgxeAXjl1sFsgstczUsCM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/configmap/configmap.go b/pkg/configmap/configmap.go index 6ea1cc8ab..8d00aec04 100644 --- a/pkg/configmap/configmap.go +++ b/pkg/configmap/configmap.go @@ -140,20 +140,21 @@ func (s *Sync) Run(namespaces []string) (chan struct{}, error) { if namespace == "*" { namespace = v1.NamespaceAll } - source := cache.NewListWatchFromClient( + listerWatcher := cache.NewListWatchFromClient( client, "configmaps", namespace, fields.Everything()) - _, controller := cache.NewInformer( - source, - &v1.ConfigMap{}, - 0, - cache.ResourceEventHandlerFuncs{ + _, controller := cache.NewInformerWithOptions(cache.InformerOptions{ + ListerWatcher: listerWatcher, + ObjectType: &v1.ConfigMap{}, + Handler: cache.ResourceEventHandlerFuncs{ AddFunc: s.add, UpdateFunc: s.update, DeleteFunc: s.delete, - }) + }, + ResyncPeriod: 0, // Set to 0 as in the original code + }) go controller.Run(quit) } return quit, nil diff --git a/pkg/data/generic.go b/pkg/data/generic.go index f649ff08d..b2221253c 100644 --- a/pkg/data/generic.go +++ b/pkg/data/generic.go @@ -41,7 +41,7 @@ type GenericSync struct { client dynamicClient opa opa_client.Data ns types.ResourceType - limiter workqueue.RateLimiter + limiter workqueue.TypedRateLimiter[any] jitterFactor float64 ignoreNamespaces []string } @@ -70,7 +70,7 @@ func NewFromInterface(client dynamic.Interface, opa opa_client.Data, ns types.Re opt(s) } if s.limiter == nil { // Use default rateLimiter if not configured - s.limiter = workqueue.NewItemExponentialFailureRateLimiter(backoffMin, backoffMax) + s.limiter = workqueue.NewTypedItemExponentialFailureRateLimiter[any](backoffMin, backoffMax) } return s } @@ -85,7 +85,7 @@ func WithIgnoreNamespaces(ignoreNamespaces []string) Option { // WithBackoff tunes the values of exponential backoff and jitter factor func WithBackoff(min, max time.Duration, jitterFactor float64) Option { return func(s *GenericSync) { - s.limiter = workqueue.NewItemExponentialFailureRateLimiter(min, max) + s.limiter = workqueue.NewTypedItemExponentialFailureRateLimiter[any](min, max) s.jitterFactor = jitterFactor } } @@ -128,13 +128,13 @@ func (s *GenericSync) RunContext(ctx context.Context) error { } // setup the store and queue for this GenericSync instance -func (s *GenericSync) setup(ctx context.Context) (cache.Store, workqueue.DelayingInterface) { +func (s *GenericSync) setup(ctx context.Context) (cache.Store, workqueue.TypedDelayingInterface[any]) { ignoreNs := s.ignoreNs() resource := s.client.ResourceFor(s.ns, metav1.NamespaceAll) queue := workqueue.NewNamedDelayingQueue(s.ns.String()) - store, controller := cache.NewInformer( - &cache.ListWatch{ + store, controller := cache.NewInformerWithOptions(cache.InformerOptions{ + ListerWatcher: &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = ignoreNs return resource.List(ctx, options) @@ -144,10 +144,10 @@ func (s *GenericSync) setup(ctx context.Context) (cache.Store, workqueue.Delayin return resource.Watch(ctx, options) }, }, - &unstructured.Unstructured{}, - 0, - resourceEventQueue{queue}, - ) + ObjectType: &unstructured.Unstructured{}, + Handler: resourceEventQueue{queue}, + ResyncPeriod: 0, + }) start, quit := time.Now(), ctx.Done() go controller.Run(quit) @@ -181,7 +181,7 @@ type resourceEventQueue struct { } // OnAdd implements ResourceHandler -func (q resourceEventQueue) OnAdd(obj interface{}) { +func (q resourceEventQueue) OnAdd(obj interface{}, isInInitialList bool) { key, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { logrus.Warnf("failed to retrieve key: %v", err) @@ -210,7 +210,7 @@ func (q resourceEventQueue) resourceVersionMatch(oldObj, newObj interface{}) boo // OnUpdate implements ResourceHandler func (q resourceEventQueue) OnUpdate(oldObj, newObj interface{}) { if !q.resourceVersionMatch(oldObj, newObj) { // Avoid sync flood on relist. We don't use resync. - q.OnAdd(newObj) + q.OnAdd(newObj, false) } } @@ -229,7 +229,7 @@ const initPath = "" // loop starts replicating Kubernetes resources into OPA. If an error occurs // during the replication process, this function will backoff and reload // all resources into OPA from scratch. -func (s *GenericSync) loop(store cache.Store, queue workqueue.DelayingInterface) { +func (s *GenericSync) loop(store cache.Store, queue workqueue.TypedDelayingInterface[any]) { logrus.Infof("Syncing %v.", s.ns) defer func() { diff --git a/vendor/github.com/emicklei/go-restful/v3/.gitignore b/vendor/github.com/emicklei/go-restful/v3/.gitignore new file mode 100644 index 000000000..446be09b4 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/.gitignore @@ -0,0 +1,71 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +restful.html + +*.out + +tmp.prof + +go-restful.test + +examples/restful-basic-authentication + +examples/restful-encoding-filter + +examples/restful-filters + +examples/restful-hello-world + +examples/restful-resource-functions + +examples/restful-serve-static + +examples/restful-user-service + +*.DS_Store +examples/restful-user-resource + +examples/restful-multi-containers + +examples/restful-form-handling + +examples/restful-CORS-filter + +examples/restful-options-filter + +examples/restful-curly-router + +examples/restful-cpuprofiler-service + +examples/restful-pre-post-filters + +curly.prof + +examples/restful-NCSA-logging + +examples/restful-html-template + +s.html +restful-path-tail +.idea diff --git a/vendor/github.com/emicklei/go-restful/v3/.goconvey b/vendor/github.com/emicklei/go-restful/v3/.goconvey new file mode 100644 index 000000000..8485e986e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/.travis.yml b/vendor/github.com/emicklei/go-restful/v3/.travis.yml new file mode 100644 index 000000000..3a0bf5ff1 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.x + +before_install: + - go test -v + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md new file mode 100644 index 000000000..5edd5a7ca --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -0,0 +1,396 @@ +# Change history of go-restful + +## [v3.11.0] - 2023-08-19 + +- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. + +## [v3.10.2] - 2023-03-09 - DO NOT USE + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + +## [v3.10.1] - 2022-11-19 - DO NOT USE + +- fix broken 3.10.0 by using path package for joining paths + +## [v3.10.0] - 2022-10-11 - BROKEN + +- changed tokenizer to match std route match behavior; do not trimright the path (#511) +- Add MIME_ZIP (#512) +- Add MIME_ZIP and HEADER_ContentDisposition (#513) +- Changed how to get query parameter issue #510 + +## [v3.9.0] - 2022-07-21 + +- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) + +## [v3.8.0] - 2022-06-06 + +- use exact matching of allowed domain entries, issue #489 (#493) + - this changes fixes [security] Authorization Bypass Through User-Controlled Key + by changing the behaviour of the AllowedDomains setting in the CORS filter. + To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc + callback mechanism which is called when a simple domain match fails. +- add test and fix for POST without body and Content-type, issue #492 (#496) +- [Minor] Bad practice to have a mix of Receiver types. (#491) + +## [v3.7.2] - 2021-11-24 + +- restored FilterChain (#482 by SVilgelm) + + +## [v3.7.1] - 2021-10-04 + +- fix problem with contentEncodingEnabled setting (#479) + +## [v3.7.0] - 2021-09-24 + +- feat(parameter): adds additional openapi mappings (#478) + +## [v3.6.0] - 2021-09-18 + +- add support for vendor extensions (#477 thx erraggy) + +## [v3.5.2] - 2021-07-14 + +- fix removing absent route from webservice (#472) + +## [v3.5.1] - 2021-04-12 + +- fix handling no match access selected path +- remove obsolete field + +## [v3.5.0] - 2021-04-10 + +- add check for wildcard (#463) in CORS +- add access to Route from Request, issue #459 (#462) + +## [v3.4.0] - 2020-11-10 + +- Added OPTIONS to WebService + +## [v3.3.2] - 2020-01-23 + +- Fixed duplicate compression in dispatch. #449 + + +## [v3.3.1] - 2020-08-31 + +- Added check on writer to prevent compression of response twice. #447 + +## [v3.3.0] - 2020-08-19 + +- Enable content encoding on Handle and ServeHTTP (#446) +- List available representations in 406 body (#437) +- Convert to string using rune() (#443) + +## [v3.2.0] - 2020-06-21 + +- 405 Method Not Allowed must have Allow header (#436) (thx Bracken ) +- add field allowedMethodsWithoutContentType (#424) + +## [v3.1.0] + +- support describing response headers (#426) +- fix openapi examples (#425) + +v3.0.0 + +- fix: use request/response resulting from filter chain +- add Go module + Module consumer should use github.com/emicklei/go-restful/v3 as import path + +v2.10.0 + +- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>) +- fixed static example (thanks Arthur ) +- simplify code (thanks Christian Muehlhaeuser ) +- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben ) + +v2.9.6 + +- small optimization in filter code + +v2.11.1 + +- fix WriteError return value (#415) + +v2.11.0 + +- allow prefix and suffix in path variable expression (#414) + +v2.9.6 + +- support google custome verb (#413) + +v2.9.5 + +- fix panic in Response.WriteError if err == nil + +v2.9.4 + +- fix issue #400 , parsing mime type quality +- Route Builder added option for contentEncodingEnabled (#398) + +v2.9.3 + +- Avoid return of 415 Unsupported Media Type when request body is empty (#396) + +v2.9.2 + +- Reduce allocations in per-request methods to improve performance (#395) + +v2.9.1 + +- Fix issue with default responses and invalid status code 0. (#393) + +v2.9.0 + +- add per Route content encoding setting (overrides container setting) + +v2.8.0 + +- add Request.QueryParameters() +- add json-iterator (via build tag) +- disable vgo module (until log is moved) + +v2.7.1 + +- add vgo module + +v2.6.1 + +- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) + +v2.6.0 + +- Make JSR 311 routing and path param processing consistent +- Adding description to RouteBuilder.Reads() +- Update example for Swagger12 and OpenAPI + +2017-09-13 + +- added route condition functions using `.If(func)` in route building. + +2017-02-16 + +- solved issue #304, make operation names unique + +2017-01-30 + + [IMPORTANT] For swagger users, change your import statement to: + swagger "github.com/emicklei/go-restful-swagger12" + +- moved swagger 1.2 code to go-restful-swagger12 +- created TAG 2.0.0 + +2017-01-27 + +- remove defer request body close +- expose Dispatch for testing filters and Routefunctions +- swagger response model cannot be array +- created TAG 1.0.0 + +2016-12-22 + +- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) + +2016-11-26 + +- Default change! now use CurlyRouter (was RouterJSR311) +- Default change! no more caching of request content +- Default change! do not recover from panics + +2016-09-22 + +- fix the DefaultRequestContentType feature + +2016-02-14 + +- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response +- add constructors for custom entity accessors for xml and json + +2015-09-27 + +- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency + +2015-09-25 + +- fixed problem with changing Header after WriteHeader (issue 235) + +2015-09-14 + +- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) +- added support for custom EntityReaderWriters. + +2015-08-06 + +- add support for reading entities from compressed request content +- use sync.Pool for compressors of http response and request body +- add Description to Parameter for documentation in Swagger UI + +2015-03-20 + +- add configurable logging + +2015-03-18 + +- if not specified, the Operation is derived from the Route function + +2015-03-17 + +- expose Parameter creation functions +- make trace logger an interface +- fix OPTIONSFilter +- customize rendering of ServiceError +- JSR311 router now handles wildcards +- add Notes to Route + +2014-11-27 + +- (api add) PrettyPrint per response. (as proposed in #167) + +2014-11-12 + +- (api add) ApiVersion(.) for documentation in Swagger UI + +2014-11-10 + +- (api change) struct fields tagged with "description" show up in Swagger UI + +2014-10-31 + +- (api change) ReturnsError -> Returns +- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder +- fix swagger nested structs +- sort Swagger response messages by code + +2014-10-23 + +- (api add) ReturnsError allows you to document Http codes in swagger +- fixed problem with greedy CurlyRouter +- (api add) Access-Control-Max-Age in CORS +- add tracing functionality (injectable) for debugging purposes +- support JSON parse 64bit int +- fix empty parameters for swagger +- WebServicesUrl is now optional for swagger +- fixed duplicate AccessControlAllowOrigin in CORS +- (api change) expose ServeMux in container +- (api add) added AllowedDomains in CORS +- (api add) ParameterNamed for detailed documentation + +2014-04-16 + +- (api add) expose constructor of Request for testing. + +2014-06-27 + +- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). +- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). + +2014-07-03 + +- (api add) CORS can be configured with a list of allowed domains + +2014-03-12 + +- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) + +2014-02-26 + +- (api add) Request now provides information about the matched Route, see method SelectedRoutePath + +2014-02-17 + +- (api change) renamed parameter constants (go-lint checks) + +2014-01-10 + +- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier + +2014-01-07 + +- (api change) Write* methods in Response now return the error or nil. +- added example of serving HTML from a Go template. +- fixed comparing Allowed headers in CORS (is now case-insensitive) + +2013-11-13 + +- (api add) Response knows how many bytes are written to the response body. + +2013-10-29 + +- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. + +2013-10-04 + +- (api add) Response knows what HTTP status has been written +- (api add) Request can have attributes (map of string->interface, also called request-scoped variables + +2013-09-12 + +- (api change) Router interface simplified +- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths + +2013-08-05 + - add OPTIONS support + - add CORS support + +2013-08-27 + +- fixed some reported issues (see github) +- (api change) deprecated use of WriteError; use WriteErrorString instead + +2014-04-15 + +- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString + +2013-08-08 + +- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. +- (api add) the swagger package has be extended to have a UI per container. +- if panic is detected then a small stack trace is printed (thanks to runner-mei) +- (api add) WriteErrorString to Response + +Important API changes: + +- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. +- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. + + +2013-07-06 + +- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. + +2013-06-19 + +- (improve) DoNotRecover option, moved request body closer, improved ReadEntity + +2013-06-03 + +- (api change) removed Dispatcher interface, hide PathExpression +- changed receiver names of type functions to be more idiomatic Go + +2013-06-02 + +- (optimize) Cache the RegExp compilation of Paths. + +2013-05-22 + +- (api add) Added support for request/response filter functions + +2013-05-18 + + +- (api add) Added feature to change the default Http Request Dispatch function (travis cline) +- (api change) Moved Swagger Webservice to swagger package (see example restful-user) + +[2012-11-14 .. 2013-05-18> + +- See https://github.com/emicklei/go-restful/commits + +2012-11-14 + +- Initial commit + + diff --git a/vendor/github.com/emicklei/go-restful/v3/LICENSE b/vendor/github.com/emicklei/go-restful/v3/LICENSE new file mode 100644 index 000000000..ece7ec61e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012,2013 Ernest Micklei + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/Makefile b/vendor/github.com/emicklei/go-restful/v3/Makefile new file mode 100644 index 000000000..16d0b80bb --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/Makefile @@ -0,0 +1,8 @@ +all: test + +test: + go vet . + go test -cover -v . + +ex: + find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {} \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md new file mode 100644 index 000000000..e3e30080e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -0,0 +1,112 @@ +go-restful +========== +package for building REST-style Web Services using Google Go + +[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) +[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) +[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) + +- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) + +REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: + +- GET = Retrieve a representation of a resource +- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. +- PUT = Create if you are sending the full content of the specified resource (URI). +- PUT = Update if you are updating the full content of the specified resource. +- DELETE = Delete if you are requesting the server to delete the resource +- PATCH = Update partial content of a resource +- OPTIONS = Get information about the communication options for the request URI + +### Usage + +#### Without Go Modules + +All versions up to `v2.*.*` (on the master) are not supporting Go modules. + +``` +import ( + restful "github.com/emicklei/go-restful" +) +``` + +#### Using Go Modules + +As of version `v3.0.0` (on the v3 branch), this package supports Go modules. + +``` +import ( + restful "github.com/emicklei/go-restful/v3" +) +``` + +### Example + +```Go +ws := new(restful.WebService) +ws. + Path("/users"). + Consumes(restful.MIME_XML, restful.MIME_JSON). + Produces(restful.MIME_JSON, restful.MIME_XML) + +ws.Route(ws.GET("/{user-id}").To(u.findUser). + Doc("get a user"). + Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). + Writes(User{})) +... + +func (u UserResource) findUser(request *restful.Request, response *restful.Response) { + id := request.PathParameter("user-id") + ... +} +``` + +[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go) + +### Features + +- Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support +- Configurable router: + - (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*}) + - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions +- Request API for reading structs from JSON/XML and accessing parameters (path,query,header) +- Response API for writing structs to JSON/XML and setting headers +- Customizable encoding using EntityReaderWriter registration +- Filters for intercepting the request → response flow on Service or Route level +- Request-scoped variables using attributes +- Containers for WebServices on different HTTP endpoints +- Content encoding (gzip,deflate) of request and response payloads +- Automatic responses on OPTIONS (using a filter) +- Automatic CORS request handling (using a filter) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) +- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) +- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) +- Configurable (trace) logging +- Customizable gzip/deflate readers and writers using CompressorProvider registration +- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function + +## How to customize +There are several hooks to customize the behavior of the go-restful package. + +- Router algorithm +- Panic recovery +- JSON decoder +- Trace logging +- Compression +- Encoders for other serializers +- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` + +## Resources + +- [Example programs](./examples) +- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) +- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) +- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) +- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia) +- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) + +Type ```git shortlog -s``` for a full list of contributors. + +© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/SECURITY.md b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md new file mode 100644 index 000000000..810d3b510 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| v3.7.x | :white_check_mark: | +| < v3.0.1 | :x: | + +## Reporting a Vulnerability + +Create an Issue and put the label `[security]` in the title of the issue. +Valid reported security issues are expected to be solved within a week. diff --git a/vendor/github.com/emicklei/go-restful/v3/Srcfile b/vendor/github.com/emicklei/go-restful/v3/Srcfile new file mode 100644 index 000000000..16fd18689 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/Srcfile @@ -0,0 +1 @@ +{"SkipDirs": ["examples"]} diff --git a/vendor/github.com/emicklei/go-restful/v3/bench_test.sh b/vendor/github.com/emicklei/go-restful/v3/bench_test.sh new file mode 100644 index 000000000..47ffbe4ac --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/bench_test.sh @@ -0,0 +1,10 @@ +#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out + +go test -c +./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany +./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly + +#go tool pprof go-restful.test tmp.prof +go tool pprof go-restful.test curly.prof + + diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go new file mode 100644 index 000000000..1ff239f99 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -0,0 +1,127 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bufio" + "compress/gzip" + "compress/zlib" + "errors" + "io" + "net" + "net/http" + "strings" +) + +// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. +var EnableContentEncoding = false + +// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) +type CompressingResponseWriter struct { + writer http.ResponseWriter + compressor io.WriteCloser + encoding string +} + +// Header is part of http.ResponseWriter interface +func (c *CompressingResponseWriter) Header() http.Header { + return c.writer.Header() +} + +// WriteHeader is part of http.ResponseWriter interface +func (c *CompressingResponseWriter) WriteHeader(status int) { + c.writer.WriteHeader(status) +} + +// Write is part of http.ResponseWriter interface +// It is passed through the compressor +func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { + if c.isCompressorClosed() { + return -1, errors.New("Compressing error: tried to write data using closed compressor") + } + return c.compressor.Write(bytes) +} + +// CloseNotify is part of http.CloseNotifier interface +func (c *CompressingResponseWriter) CloseNotify() <-chan bool { + return c.writer.(http.CloseNotifier).CloseNotify() +} + +// Close the underlying compressor +func (c *CompressingResponseWriter) Close() error { + if c.isCompressorClosed() { + return errors.New("Compressing error: tried to close already closed compressor") + } + + c.compressor.Close() + if ENCODING_GZIP == c.encoding { + currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) + } + if ENCODING_DEFLATE == c.encoding { + currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) + } + // gc hint needed? + c.compressor = nil + return nil +} + +func (c *CompressingResponseWriter) isCompressorClosed() bool { + return nil == c.compressor +} + +// Hijack implements the Hijacker interface +// This is especially useful when combining Container.EnabledContentEncoding +// in combination with websockets (for instance gorilla/websocket) +func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := c.writer.(http.Hijacker) + if !ok { + return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") + } + return hijacker.Hijack() +} + +// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. +// It also inspects the httpWriter whether its content-encoding is already set (non-empty). +func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) { + if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" { + return false, "" + } + header := httpRequest.Header.Get(HEADER_AcceptEncoding) + gi := strings.Index(header, ENCODING_GZIP) + zi := strings.Index(header, ENCODING_DEFLATE) + // use in order of appearance + if gi == -1 { + return zi != -1, ENCODING_DEFLATE + } else if zi == -1 { + return gi != -1, ENCODING_GZIP + } else { + if gi < zi { + return true, ENCODING_GZIP + } + return true, ENCODING_DEFLATE + } +} + +// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} +func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { + httpWriter.Header().Set(HEADER_ContentEncoding, encoding) + c := new(CompressingResponseWriter) + c.writer = httpWriter + var err error + if ENCODING_GZIP == encoding { + w := currentCompressorProvider.AcquireGzipWriter() + w.Reset(httpWriter) + c.compressor = w + c.encoding = ENCODING_GZIP + } else if ENCODING_DEFLATE == encoding { + w := currentCompressorProvider.AcquireZlibWriter() + w.Reset(httpWriter) + c.compressor = w + c.encoding = ENCODING_DEFLATE + } else { + return nil, errors.New("Unknown encoding:" + encoding) + } + return c, err +} diff --git a/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go b/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go new file mode 100644 index 000000000..ee426010a --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go @@ -0,0 +1,103 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/gzip" + "compress/zlib" +) + +// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount +// of writers and readers (resources). +// If a new resource is acquired and all are in use, it will return a new unmanaged resource. +type BoundedCachedCompressors struct { + gzipWriters chan *gzip.Writer + gzipReaders chan *gzip.Reader + zlibWriters chan *zlib.Writer + writersCapacity int + readersCapacity int +} + +// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. +func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { + b := &BoundedCachedCompressors{ + gzipWriters: make(chan *gzip.Writer, writersCapacity), + gzipReaders: make(chan *gzip.Reader, readersCapacity), + zlibWriters: make(chan *zlib.Writer, writersCapacity), + writersCapacity: writersCapacity, + readersCapacity: readersCapacity, + } + for ix := 0; ix < writersCapacity; ix++ { + b.gzipWriters <- newGzipWriter() + b.zlibWriters <- newZlibWriter() + } + for ix := 0; ix < readersCapacity; ix++ { + b.gzipReaders <- newGzipReader() + } + return b +} + +// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. +func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { + var writer *gzip.Writer + select { + case writer, _ = <-b.gzipWriters: + default: + // return a new unmanaged one + writer = newGzipWriter() + } + return writer +} + +// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { + // forget the unmanaged ones + if len(b.gzipWriters) < b.writersCapacity { + b.gzipWriters <- w + } +} + +// AcquireGzipReader returns a *gzip.Reader. Needs to be released. +func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { + var reader *gzip.Reader + select { + case reader, _ = <-b.gzipReaders: + default: + // return a new unmanaged one + reader = newGzipReader() + } + return reader +} + +// ReleaseGzipReader accepts a reader (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { + // forget the unmanaged ones + if len(b.gzipReaders) < b.readersCapacity { + b.gzipReaders <- r + } +} + +// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. +func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { + var writer *zlib.Writer + select { + case writer, _ = <-b.zlibWriters: + default: + // return a new unmanaged one + writer = newZlibWriter() + } + return writer +} + +// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) +// only when the cache has room for it. It will ignore it otherwise. +func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { + // forget the unmanaged ones + if len(b.zlibWriters) < b.writersCapacity { + b.zlibWriters <- w + } +} diff --git a/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go b/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go new file mode 100644 index 000000000..d866ce64b --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go @@ -0,0 +1,91 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "sync" +) + +// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. +type SyncPoolCompessors struct { + GzipWriterPool *sync.Pool + GzipReaderPool *sync.Pool + ZlibWriterPool *sync.Pool +} + +// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. +func NewSyncPoolCompessors() *SyncPoolCompessors { + return &SyncPoolCompessors{ + GzipWriterPool: &sync.Pool{ + New: func() interface{} { return newGzipWriter() }, + }, + GzipReaderPool: &sync.Pool{ + New: func() interface{} { return newGzipReader() }, + }, + ZlibWriterPool: &sync.Pool{ + New: func() interface{} { return newZlibWriter() }, + }, + } +} + +func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { + return s.GzipWriterPool.Get().(*gzip.Writer) +} + +func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { + s.GzipWriterPool.Put(w) +} + +func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { + return s.GzipReaderPool.Get().(*gzip.Reader) +} + +func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { + s.GzipReaderPool.Put(r) +} + +func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { + return s.ZlibWriterPool.Get().(*zlib.Writer) +} + +func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { + s.ZlibWriterPool.Put(w) +} + +func newGzipWriter() *gzip.Writer { + // create with an empty bytes writer; it will be replaced before using the gzipWriter + writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} + +func newGzipReader() *gzip.Reader { + // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader + // we can safely use currentCompressProvider because it is set on package initialization. + w := currentCompressorProvider.AcquireGzipWriter() + defer currentCompressorProvider.ReleaseGzipWriter(w) + b := new(bytes.Buffer) + w.Reset(b) + w.Flush() + w.Close() + reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) + if err != nil { + panic(err.Error()) + } + return reader +} + +func newZlibWriter() *zlib.Writer { + writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} diff --git a/vendor/github.com/emicklei/go-restful/v3/compressors.go b/vendor/github.com/emicklei/go-restful/v3/compressors.go new file mode 100644 index 000000000..9db4a8c8e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/compressors.go @@ -0,0 +1,54 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/gzip" + "compress/zlib" +) + +// CompressorProvider describes a component that can provider compressors for the std methods. +type CompressorProvider interface { + // Returns a *gzip.Writer which needs to be released later. + // Before using it, call Reset(). + AcquireGzipWriter() *gzip.Writer + + // Releases an acquired *gzip.Writer. + ReleaseGzipWriter(w *gzip.Writer) + + // Returns a *gzip.Reader which needs to be released later. + AcquireGzipReader() *gzip.Reader + + // Releases an acquired *gzip.Reader. + ReleaseGzipReader(w *gzip.Reader) + + // Returns a *zlib.Writer which needs to be released later. + // Before using it, call Reset(). + AcquireZlibWriter() *zlib.Writer + + // Releases an acquired *zlib.Writer. + ReleaseZlibWriter(w *zlib.Writer) +} + +// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). +var currentCompressorProvider CompressorProvider + +func init() { + currentCompressorProvider = NewSyncPoolCompessors() +} + +// CurrentCompressorProvider returns the current CompressorProvider. +// It is initialized using a SyncPoolCompessors. +func CurrentCompressorProvider() CompressorProvider { + return currentCompressorProvider +} + +// SetCompressorProvider sets the actual provider of compressors (zlib or gzip). +func SetCompressorProvider(p CompressorProvider) { + if p == nil { + panic("cannot set compressor provider to nil") + } + currentCompressorProvider = p +} diff --git a/vendor/github.com/emicklei/go-restful/v3/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go new file mode 100644 index 000000000..2328bde6c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/constants.go @@ -0,0 +1,32 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +const ( + MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default + + HEADER_Allow = "Allow" + HEADER_Accept = "Accept" + HEADER_Origin = "Origin" + HEADER_ContentType = "Content-Type" + HEADER_ContentDisposition = "Content-Disposition" + HEADER_LastModified = "Last-Modified" + HEADER_AcceptEncoding = "Accept-Encoding" + HEADER_ContentEncoding = "Content-Encoding" + HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" + HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" + HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" + HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" + HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" + HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" + HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" + HEADER_AccessControlMaxAge = "Access-Control-Max-Age" + + ENCODING_GZIP = "gzip" + ENCODING_DEFLATE = "deflate" +) diff --git a/vendor/github.com/emicklei/go-restful/v3/container.go b/vendor/github.com/emicklei/go-restful/v3/container.go new file mode 100644 index 000000000..dd56246dd --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/container.go @@ -0,0 +1,450 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "os" + "runtime" + "strings" + "sync" + + "github.com/emicklei/go-restful/v3/log" +) + +// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. +// The requests are further dispatched to routes of WebServices using a RouteSelector +type Container struct { + webServicesLock sync.RWMutex + webServices []*WebService + ServeMux *http.ServeMux + isRegisteredOnRoot bool + containerFilters []FilterFunction + doNotRecover bool // default is true + recoverHandleFunc RecoverHandleFunction + serviceErrorHandleFunc ServiceErrorHandleFunction + router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative) + contentEncodingEnabled bool // default is false +} + +// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter) +func NewContainer() *Container { + return &Container{ + webServices: []*WebService{}, + ServeMux: http.NewServeMux(), + isRegisteredOnRoot: false, + containerFilters: []FilterFunction{}, + doNotRecover: true, + recoverHandleFunc: logStackOnRecover, + serviceErrorHandleFunc: writeServiceError, + router: CurlyRouter{}, + contentEncodingEnabled: false} +} + +// RecoverHandleFunction declares functions that can be used to handle a panic situation. +// The first argument is what recover() returns. The second must be used to communicate an error response. +type RecoverHandleFunction func(interface{}, http.ResponseWriter) + +// RecoverHandler changes the default function (logStackOnRecover) to be called +// when a panic is detected. DoNotRecover must be have its default value (=false). +func (c *Container) RecoverHandler(handler RecoverHandleFunction) { + c.recoverHandleFunc = handler +} + +// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. +// The first argument is the service error, the second is the request that resulted in the error and +// the third must be used to communicate an error response. +type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) + +// ServiceErrorHandler changes the default function (writeServiceError) to be called +// when a ServiceError is detected. +func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { + c.serviceErrorHandleFunc = handler +} + +// DoNotRecover controls whether panics will be caught to return HTTP 500. +// If set to true, Route functions are responsible for handling any error situation. +// Default value is true. +func (c *Container) DoNotRecover(doNot bool) { + c.doNotRecover = doNot +} + +// Router changes the default Router (currently CurlyRouter) +func (c *Container) Router(aRouter RouteSelector) { + c.router = aRouter +} + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. +func (c *Container) EnableContentEncoding(enabled bool) { + c.contentEncodingEnabled = enabled +} + +// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. +func (c *Container) Add(service *WebService) *Container { + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() + + // if rootPath was not set then lazy initialize it + if len(service.rootPath) == 0 { + service.Path("/") + } + + // cannot have duplicate root paths + for _, each := range c.webServices { + if each.RootPath() == service.RootPath() { + log.Printf("WebService with duplicate root path detected:['%v']", each) + os.Exit(1) + } + } + + // If not registered on root then add specific mapping + if !c.isRegisteredOnRoot { + c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) + } + c.webServices = append(c.webServices, service) + return c +} + +// addHandler may set a new HandleFunc for the serveMux +// this function must run inside the critical region protected by the webServicesLock. +// returns true if the function was registered on root ("/") +func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { + pattern := fixedPrefixPath(service.RootPath()) + // check if root path registration is needed + if "/" == pattern || "" == pattern { + serveMux.HandleFunc("/", c.dispatch) + return true + } + // detect if registration already exists + alreadyMapped := false + for _, each := range c.webServices { + if each.RootPath() == service.RootPath() { + alreadyMapped = true + break + } + } + if !alreadyMapped { + serveMux.HandleFunc(pattern, c.dispatch) + if !strings.HasSuffix(pattern, "/") { + serveMux.HandleFunc(pattern+"/", c.dispatch) + } + } + return false +} + +func (c *Container) Remove(ws *WebService) error { + if c.ServeMux == http.DefaultServeMux { + errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) + log.Print(errMsg) + return errors.New(errMsg) + } + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() + // build a new ServeMux and re-register all WebServices + newServeMux := http.NewServeMux() + newServices := []*WebService{} + newIsRegisteredOnRoot := false + for _, each := range c.webServices { + if each.rootPath != ws.rootPath { + // If not registered on root then add specific mapping + if !newIsRegisteredOnRoot { + newIsRegisteredOnRoot = c.addHandler(each, newServeMux) + } + newServices = append(newServices, each) + } + } + c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot + return nil +} + +// logStackOnRecover is the default RecoverHandleFunction and is called +// when DoNotRecover is false and the recoverHandleFunc is not set for the container. +// Default implementation logs the stacktrace and writes the stacktrace on the response. +// This may be a security issue as it exposes sourcecode information. +func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { + var buffer bytes.Buffer + buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) + for i := 2; ; i += 1 { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) + } + log.Print(buffer.String()) + httpWriter.WriteHeader(http.StatusInternalServerError) + httpWriter.Write(buffer.Bytes()) +} + +// writeServiceError is the default ServiceErrorHandleFunction and is called +// when a ServiceError is returned during route selection. Default implementation +// calls resp.WriteErrorString(err.Code, err.Message) +func writeServiceError(err ServiceError, req *Request, resp *Response) { + for header, values := range err.Header { + for _, value := range values { + resp.Header().Add(header, value) + } + } + resp.WriteErrorString(err.Code, err.Message) +} + +// Dispatch the incoming Http Request to a matching WebService. +func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + if httpWriter == nil { + panic("httpWriter cannot be nil") + } + if httpRequest == nil { + panic("httpRequest cannot be nil") + } + c.dispatch(httpWriter, httpRequest) +} + +// Dispatch the incoming Http Request to a matching WebService. +func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // so we can assign a compressing one later + writer := httpWriter + + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + // Instal panic recovery unless told otherwise + if !c.doNotRecover { // catch all for 500 response + defer func() { + if r := recover(); r != nil { + c.recoverHandleFunc(r, writer) + return + } + }() + } + + // Find best match Route ; err is non nil if no match was found + var webService *WebService + var route *Route + var err error + func() { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + webService, route, err = c.router.SelectRoute( + c.webServices, + httpRequest) + }() + if err != nil { + // a non-200 response (may be compressed) has already been written + // run container filters anyway ; they should not touch the response... + chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { + switch err.(type) { + case ServiceError: + ser := err.(ServiceError) + c.serviceErrorHandleFunc(ser, req, resp) + } + // TODO + }} + chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) + return + } + + // Unless httpWriter is already an CompressingResponseWriter see if we need to install one + if _, isCompressing := httpWriter.(*CompressingResponseWriter); !isCompressing { + // Detect if compression is needed + // assume without compression, test for override + contentEncodingEnabled := c.contentEncodingEnabled + if route != nil && route.contentEncodingEnabled != nil { + contentEncodingEnabled = *route.contentEncodingEnabled + } + if contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + } + + pathProcessor, routerProcessesPath := c.router.(PathProcessor) + if !routerProcessesPath { + pathProcessor = defaultPathProcessor{} + } + pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) + wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) + // pass through filters (if any) + if size := len(c.containerFilters) + len(webService.filters) + len(route.Filters); size > 0 { + // compose filter chain + allFilters := make([]FilterFunction, 0, size) + allFilters = append(allFilters, c.containerFilters...) + allFilters = append(allFilters, webService.filters...) + allFilters = append(allFilters, route.Filters...) + chain := FilterChain{ + Filters: allFilters, + Target: route.Function, + ParameterDocs: route.ParameterDocs, + Operation: route.Operation, + } + chain.ProcessFilter(wrappedRequest, wrappedResponse) + } else { + // no filters, handle request by route + route.Function(wrappedRequest, wrappedResponse) + } +} + +// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} +func fixedPrefixPath(pathspec string) string { + varBegin := strings.Index(pathspec, "{") + if -1 == varBegin { + return pathspec + } + return pathspec[:varBegin] +} + +// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server +func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // Skip, if content encoding is disabled + if !c.contentEncodingEnabled { + c.ServeMux.ServeHTTP(httpWriter, httpRequest) + return + } + // content encoding is enabled + + // Skip, if httpWriter is already an CompressingResponseWriter + if _, ok := httpWriter.(*CompressingResponseWriter); ok { + c.ServeMux.ServeHTTP(httpWriter, httpRequest) + return + } + + writer := httpWriter + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + + c.ServeMux.ServeHTTP(writer, httpRequest) +} + +// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. +func (c *Container) Handle(pattern string, handler http.Handler) { + c.ServeMux.Handle(pattern, http.HandlerFunc(func(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // Skip, if httpWriter is already an CompressingResponseWriter + if _, ok := httpWriter.(*CompressingResponseWriter); ok { + handler.ServeHTTP(httpWriter, httpRequest) + return + } + + writer := httpWriter + + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + if c.contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + + handler.ServeHTTP(writer, httpRequest) + })) +} + +// HandleWithFilter registers the handler for the given pattern. +// Container's filter chain is applied for handler. +// If a handler already exists for pattern, HandleWithFilter panics. +func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { + f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { + if len(c.containerFilters) == 0 { + handler.ServeHTTP(httpResponse, httpRequest) + return + } + + chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { + handler.ServeHTTP(resp, req.Request) + }} + chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) + } + + c.Handle(pattern, http.HandlerFunc(f)) +} + +// Filter appends a container FilterFunction. These are called before dispatching +// a http.Request to a WebService from the container +func (c *Container) Filter(filter FilterFunction) { + c.containerFilters = append(c.containerFilters, filter) +} + +// RegisteredWebServices returns the collections of added WebServices +func (c *Container) RegisteredWebServices() []*WebService { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + result := make([]*WebService, len(c.webServices)) + for ix := range c.webServices { + result[ix] = c.webServices[ix] + } + return result +} + +// computeAllowedMethods returns a list of HTTP methods that are valid for a Request +func (c *Container) computeAllowedMethods(req *Request) []string { + // Go through all RegisteredWebServices() and all its Routes to collect the options + methods := []string{} + requestPath := req.Request.URL.Path + for _, ws := range c.RegisteredWebServices() { + matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) + if matches != nil { + finalMatch := matches[len(matches)-1] + for _, rt := range ws.Routes() { + matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) + if matches != nil { + lastMatch := matches[len(matches)-1] + if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. + methods = append(methods, rt.Method) + } + } + } + } + } + // methods = append(methods, "OPTIONS") not sure about this + return methods +} + +// newBasicRequestResponse creates a pair of Request,Response from its http versions. +// It is basic because no parameter or (produces) content-type information is given. +func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { + resp := NewResponse(httpWriter) + resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) + return NewRequest(httpRequest), resp +} diff --git a/vendor/github.com/emicklei/go-restful/v3/cors_filter.go b/vendor/github.com/emicklei/go-restful/v3/cors_filter.go new file mode 100644 index 000000000..9d18dfb7b --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/cors_filter.go @@ -0,0 +1,193 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "regexp" + "strconv" + "strings" +) + +// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. +// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page +// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. +// +// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing +// http://enable-cors.org/server.html +// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request +type CrossOriginResourceSharing struct { + ExposeHeaders []string // list of Header names + + // AllowedHeaders is alist of Header names. Checking is case-insensitive. + // The list may contain the special wildcard string ".*" ; all is allowed + AllowedHeaders []string + + // AllowedDomains is a list of allowed values for Http Origin. + // The list may contain the special wildcard string ".*" ; all is allowed + // If empty all are allowed. + AllowedDomains []string + + // AllowedDomainFunc is optional and is a function that will do the check + // when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*". + AllowedDomainFunc func(origin string) bool + + // AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive. + AllowedMethods []string + MaxAge int // number of seconds before requiring new Options request + CookiesAllowed bool + Container *Container + + allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. +} + +// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html +// and http://www.html5rocks.com/static/images/cors_server_flowchart.png +func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { + origin := req.Request.Header.Get(HEADER_Origin) + if len(origin) == 0 { + if trace { + traceLogger.Print("no Http header Origin set") + } + chain.ProcessFilter(req, resp) + return + } + if !c.isOriginAllowed(origin) { // check whether this origin is allowed + if trace { + traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) + } + chain.ProcessFilter(req, resp) + return + } + if req.Request.Method != "OPTIONS" { + c.doActualRequest(req, resp) + chain.ProcessFilter(req, resp) + return + } + if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { + c.doPreflightRequest(req, resp) + } else { + c.doActualRequest(req, resp) + chain.ProcessFilter(req, resp) + return + } +} + +func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { + c.setOptionsHeaders(req, resp) + // continue processing the response +} + +func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { + if len(c.AllowedMethods) == 0 { + if c.Container == nil { + c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) + } else { + c.AllowedMethods = c.Container.computeAllowedMethods(req) + } + } + + acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) + if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { + if trace { + traceLogger.Printf("Http header %s:%s is not in %v", + HEADER_AccessControlRequestMethod, + acrm, + c.AllowedMethods) + } + return + } + acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) + if len(acrhs) > 0 { + for _, each := range strings.Split(acrhs, ",") { + if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { + if trace { + traceLogger.Printf("Http header %s:%s is not in %v", + HEADER_AccessControlRequestHeaders, + acrhs, + c.AllowedHeaders) + } + return + } + } + } + resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) + resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) + c.setOptionsHeaders(req, resp) + + // return http 200 response, no body +} + +func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { + c.checkAndSetExposeHeaders(resp) + c.setAllowOriginHeader(req, resp) + c.checkAndSetAllowCredentials(resp) + if c.MaxAge > 0 { + resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) + } +} + +func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { + if len(origin) == 0 { + return false + } + lowerOrigin := strings.ToLower(origin) + if len(c.AllowedDomains) == 0 { + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(lowerOrigin) + } + return true + } + + // exact match on each allowed domain + for _, domain := range c.AllowedDomains { + if domain == ".*" || strings.ToLower(domain) == lowerOrigin { + return true + } + } + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(origin) + } + return false +} + +func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { + origin := req.Request.Header.Get(HEADER_Origin) + if c.isOriginAllowed(origin) { + resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) + } +} + +func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { + if len(c.ExposeHeaders) > 0 { + resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) + } +} + +func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { + if c.CookiesAllowed { + resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") + } +} + +func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { + for _, each := range allowedMethods { + if each == method { + return true + } + } + return false +} + +func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { + for _, each := range c.AllowedHeaders { + if strings.ToLower(each) == strings.ToLower(header) { + return true + } + if each == "*" { + return true + } + } + return false +} diff --git a/vendor/github.com/emicklei/go-restful/v3/coverage.sh b/vendor/github.com/emicklei/go-restful/v3/coverage.sh new file mode 100644 index 000000000..e27dbf1a9 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/coverage.sh @@ -0,0 +1,2 @@ +go test -coverprofile=coverage.out +go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go new file mode 100644 index 000000000..ba1fc5d5f --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -0,0 +1,173 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" + "regexp" + "sort" + "strings" +) + +// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. +type CurlyRouter struct{} + +// SelectRoute is part of the Router interface and returns the best match +// for the WebService and its Route for the given Request. +func (c CurlyRouter) SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { + + requestTokens := tokenizePath(httpRequest.URL.Path) + + detectedService := c.detectWebService(requestTokens, webServices) + if detectedService == nil { + if trace { + traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) + } + return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + candidateRoutes := c.selectRoutes(detectedService, requestTokens) + if len(candidateRoutes) == 0 { + if trace { + traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) + } + return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) + if selectedRoute == nil { + return detectedService, nil, err + } + return detectedService, selectedRoute, nil +} + +// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. +func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { + candidates := make(sortableCurlyRoutes, 0, 8) + for _, each := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) + if matches { + candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + } + } + sort.Sort(candidates) + return candidates +} + +// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. +func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) { + if len(routeTokens) < len(requestTokens) { + // proceed in matching only if last routeToken is wildcard + count := len(routeTokens) + if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { + return false, 0, 0 + } + // proceed + } + for i, routeToken := range routeTokens { + if i == len(requestTokens) { + // reached end of request path + return false, 0, 0 + } + requestToken := requestTokens[i] + if routeHasCustomVerb && hasCustomVerb(routeToken){ + if !isMatchCustomVerb(routeToken, requestToken) { + return false, 0, 0 + } + staticCount++ + requestToken = removeCustomVerb(requestToken) + routeToken = removeCustomVerb(routeToken) + } + + if strings.HasPrefix(routeToken, "{") { + paramCount++ + if colon := strings.Index(routeToken, ":"); colon != -1 { + // match by regex + matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) + if !matchesToken { + return false, 0, 0 + } + if matchesRemainder { + break + } + } + } else { // no { prefix + if requestToken != routeToken { + return false, 0, 0 + } + staticCount++ + } + } + return true, paramCount, staticCount +} + +// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens +// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} +func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { + regPart := routeToken[colon+1 : len(routeToken)-1] + if regPart == "*" { + if trace { + traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) + } + return true, true + } + matched, err := regexp.MatchString(regPart, requestToken) + return (matched && err == nil), false +} + +var jsr311Router = RouterJSR311{} + +// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type +// headers of the Request. See also RouterJSR311 in jsr311.go +func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { + // tracing is done inside detectRoute + return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest) +} + +// detectWebService returns the best matching webService given the list of path tokens. +// see also computeWebserviceScore +func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { + var best *WebService + score := -1 + for _, each := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + if matches && (eachScore > score) { + best = each + score = eachScore + } + } + return best +} + +// computeWebserviceScore returns whether tokens match and +// the weighted score of the longest matching consecutive tokens from the beginning. +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { + if len(tokens) > len(requestTokens) { + return false, 0 + } + score := 0 + for i := 0; i < len(tokens); i++ { + each := requestTokens[i] + other := tokens[i] + if len(each) == 0 && len(other) == 0 { + score++ + continue + } + if len(other) > 0 && strings.HasPrefix(other, "{") { + // no empty match + if len(each) == 0 { + return false, score + } + score += 1 + } else { + // not a parameter + if each != other { + return false, score + } + score += (len(tokens) - i) * 10 //fuzzy + } + } + return true, score +} diff --git a/vendor/github.com/emicklei/go-restful/v3/curly_route.go b/vendor/github.com/emicklei/go-restful/v3/curly_route.go new file mode 100644 index 000000000..403dd3be9 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/curly_route.go @@ -0,0 +1,54 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. +type curlyRoute struct { + route Route + paramCount int + staticCount int +} + +// sortableCurlyRoutes orders by most parameters and path elements first. +type sortableCurlyRoutes []curlyRoute + +func (s *sortableCurlyRoutes) add(route curlyRoute) { + *s = append(*s, route) +} + +func (s sortableCurlyRoutes) routes() (routes []Route) { + routes = make([]Route, 0, len(s)) + for _, each := range s { + routes = append(routes, each.route) // TODO change return type + } + return routes +} + +func (s sortableCurlyRoutes) Len() int { + return len(s) +} +func (s sortableCurlyRoutes) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortableCurlyRoutes) Less(i, j int) bool { + a := s[j] + b := s[i] + + // primary key + if a.staticCount < b.staticCount { + return true + } + if a.staticCount > b.staticCount { + return false + } + // secundary key + if a.paramCount < b.paramCount { + return true + } + if a.paramCount > b.paramCount { + return false + } + return a.route.Path < b.route.Path +} diff --git a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go new file mode 100644 index 000000000..bfc17efde --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go @@ -0,0 +1,29 @@ +package restful + +import ( + "fmt" + "regexp" +) + +var ( + customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") +) + +func hasCustomVerb(routeToken string) bool { + return customVerbReg.MatchString(routeToken) +} + +func isMatchCustomVerb(routeToken string, pathToken string) bool { + rs := customVerbReg.FindStringSubmatch(routeToken) + if len(rs) < 2 { + return false + } + + customVerb := rs[1] + specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb)) + return specificVerbReg.MatchString(pathToken) +} + +func removeCustomVerb(str string) string { + return customVerbReg.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/emicklei/go-restful/v3/doc.go b/vendor/github.com/emicklei/go-restful/v3/doc.go new file mode 100644 index 000000000..69b13057d --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/doc.go @@ -0,0 +1,185 @@ +/* +Package restful , a lean package for creating REST-style WebServices without magic. + +WebServices and Routes + +A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. +Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. +WebServices must be added to a container (see below) in order to handler Http requests from a server. + +A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). +This package has the logic to find the best matching Route and if found, call its Function. + + ws := new(restful.WebService) + ws. + Path("/users"). + Consumes(restful.MIME_JSON, restful.MIME_XML). + Produces(restful.MIME_JSON, restful.MIME_XML) + + ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource + + ... + + // GET http://localhost:8080/users/1 + func (u UserResource) findUser(request *restful.Request, response *restful.Response) { + id := request.PathParameter("user-id") + ... + } + +The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. + +See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation. + +Regular expression matching Routes + +A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. +For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. +Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) +This feature requires the use of a CurlyRouter. + +Containers + +A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. +Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. +The Default container of go-restful uses the http.DefaultServeMux. +You can create your own Container and create a new http.Server for that particular container. + + container := restful.NewContainer() + server := &http.Server{Addr: ":8081", Handler: container} + +Filters + +A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. +You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. +In the restful package there are three hooks into the request,response flow where filters can be added. +Each filter must define a FilterFunction: + + func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) + +Use the following statement to pass the request,response pair to the next filter or RouteFunction + + chain.ProcessFilter(req, resp) + +Container Filters + +These are processed before any registered WebService. + + // install a (global) filter for the default container (processed before any webservice) + restful.Filter(globalLogging) + +WebService Filters + +These are processed before any Route of a WebService. + + // install a webservice filter (processed before any route) + ws.Filter(webserviceLogging).Filter(measureTime) + + +Route Filters + +These are processed before calling the function associated with the Route. + + // install 2 chained route filters (processed before calling findUser) + ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) + +See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations. + +Response Encoding + +Two encodings are supported: gzip and deflate. To enable this for all responses: + + restful.DefaultContainer.EnableContentEncoding(true) + +If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. +Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. + +See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go + +OPTIONS support + +By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. + + Filter(OPTIONSFilter()) + +CORS + +By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. + + cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} + Filter(cors.Filter) + +Error Handling + +Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. +For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. + + 400: Bad Request + +If path or query parameters are not valid (content or type) then use http.StatusBadRequest. + + 404: Not Found + +Despite a valid URI, the resource requested may not be available + + 500: Internal Server Error + +If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. + + 405: Method Not Allowed + +The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. + + 406: Not Acceptable + +The request does not have or has an unknown Accept Header set for this operation. + + 415: Unsupported Media Type + +The request does not have or has an unknown Content-Type Header set for this operation. + +ServiceError + +In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. + +Performance options + +This package has several options that affect the performance of your service. It is important to understand them and how you can change it. + + restful.DefaultContainer.DoNotRecover(false) + +DoNotRecover controls whether panics will be caught to return HTTP 500. +If set to false, the container will recover from panics. +Default value is true + + restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) + +If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. +Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. + +Trouble shooting + +This package has the means to produce detail logging of the complete Http request matching process and filter invocation. +Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: + + restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) + +Logging + +The restful.SetLogger() method allows you to override the logger used by the package. By default restful +uses the standard library `log` package and logs to stdout. Different logging packages are supported as +long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your +preferred package is simple. + +Resources + +[project]: https://github.com/emicklei/go-restful + +[examples]: https://github.com/emicklei/go-restful/blob/master/examples + +[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ + +[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape + +(c) 2012-2015, http://ernestmicklei.com. MIT License +*/ +package restful diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go new file mode 100644 index 000000000..66dfc824f --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -0,0 +1,162 @@ +package restful + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "encoding/xml" + "strings" + "sync" +) + +// EntityReaderWriter can read and write values using an encoding such as JSON,XML. +type EntityReaderWriter interface { + // Read a serialized version of the value from the request. + // The Request may have a decompressing reader. Depends on Content-Encoding. + Read(req *Request, v interface{}) error + + // Write a serialized version of the value on the response. + // The Response may have a compressing writer. Depends on Accept-Encoding. + // status should be a valid Http Status code + Write(resp *Response, status int, v interface{}) error +} + +// entityAccessRegistry is a singleton +var entityAccessRegistry = &entityReaderWriters{ + protection: new(sync.RWMutex), + accessors: map[string]EntityReaderWriter{}, +} + +// entityReaderWriters associates MIME to an EntityReaderWriter +type entityReaderWriters struct { + protection *sync.RWMutex + accessors map[string]EntityReaderWriter +} + +func init() { + RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) + RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) +} + +// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. +func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { + entityAccessRegistry.protection.Lock() + defer entityAccessRegistry.protection.Unlock() + entityAccessRegistry.accessors[mime] = erw +} + +// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. +// This package is already initialized with such an accessor using the MIME_JSON contentType. +func NewEntityAccessorJSON(contentType string) EntityReaderWriter { + return entityJSONAccess{ContentType: contentType} +} + +// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. +// This package is already initialized with such an accessor using the MIME_XML contentType. +func NewEntityAccessorXML(contentType string) EntityReaderWriter { + return entityXMLAccess{ContentType: contentType} +} + +// accessorAt returns the registered ReaderWriter for this MIME type. +func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { + r.protection.RLock() + defer r.protection.RUnlock() + er, ok := r.accessors[mime] + if !ok { + // retry with reverse lookup + // more expensive but we are in an exceptional situation anyway + for k, v := range r.accessors { + if strings.Contains(mime, k) { + return v, true + } + } + } + return er, ok +} + +// entityXMLAccess is a EntityReaderWriter for XML encoding +type entityXMLAccess struct { + // This is used for setting the Content-Type header when writing + ContentType string +} + +// Read unmarshalls the value from XML +func (e entityXMLAccess) Read(req *Request, v interface{}) error { + return xml.NewDecoder(req.Request.Body).Decode(v) +} + +// Write marshalls the value to JSON and set the Content-Type Header. +func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { + return writeXML(resp, status, e.ContentType, v) +} + +// writeXML marshalls the value to JSON and set the Content-Type Header. +func writeXML(resp *Response, status int, contentType string, v interface{}) error { + if v == nil { + resp.WriteHeader(status) + // do not write a nil representation + return nil + } + if resp.prettyPrint { + // pretty output must be created and written explicitly + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + return err + } + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + _, err = resp.Write([]byte(xml.Header)) + if err != nil { + return err + } + _, err = resp.Write(output) + return err + } + // not-so-pretty + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + return xml.NewEncoder(resp).Encode(v) +} + +// entityJSONAccess is a EntityReaderWriter for JSON encoding +type entityJSONAccess struct { + // This is used for setting the Content-Type header when writing + ContentType string +} + +// Read unmarshalls the value from JSON +func (e entityJSONAccess) Read(req *Request, v interface{}) error { + decoder := NewDecoder(req.Request.Body) + decoder.UseNumber() + return decoder.Decode(v) +} + +// Write marshalls the value to JSON and set the Content-Type Header. +func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { + return writeJSON(resp, status, e.ContentType, v) +} + +// write marshalls the value to JSON and set the Content-Type Header. +func writeJSON(resp *Response, status int, contentType string, v interface{}) error { + if v == nil { + resp.WriteHeader(status) + // do not write a nil representation + return nil + } + if resp.prettyPrint { + // pretty output must be created and written explicitly + output, err := MarshalIndent(v, "", " ") + if err != nil { + return err + } + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + _, err = resp.Write(output) + return err + } + // not-so-pretty + resp.Header().Set(HEADER_ContentType, contentType) + resp.WriteHeader(status) + return NewEncoder(resp).Encode(v) +} diff --git a/vendor/github.com/emicklei/go-restful/v3/extensions.go b/vendor/github.com/emicklei/go-restful/v3/extensions.go new file mode 100644 index 000000000..5023fa049 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/extensions.go @@ -0,0 +1,21 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// ExtensionProperties provides storage of vendor extensions for entities +type ExtensionProperties struct { + // Extensions vendor extensions used to describe extra functionality + // (https://swagger.io/docs/specification/2-0/swagger-extensions/) + Extensions map[string]interface{} +} + +// AddExtension adds or updates a key=value pair to the extension map. +func (ep *ExtensionProperties) AddExtension(key string, value interface{}) { + if ep.Extensions == nil { + ep.Extensions = map[string]interface{}{key: value} + } else { + ep.Extensions[key] = value + } +} diff --git a/vendor/github.com/emicklei/go-restful/v3/filter.go b/vendor/github.com/emicklei/go-restful/v3/filter.go new file mode 100644 index 000000000..fd88c536c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/filter.go @@ -0,0 +1,37 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. +type FilterChain struct { + Filters []FilterFunction // ordered list of FilterFunction + Index int // index into filters that is currently in progress + Target RouteFunction // function to call after passing all filters + ParameterDocs []*Parameter // the parameter docs for the route + Operation string // the name of the operation +} + +// ProcessFilter passes the request,response pair through the next of Filters. +// Each filter can decide to proceed to the next Filter or handle the Response itself. +func (f *FilterChain) ProcessFilter(request *Request, response *Response) { + if f.Index < len(f.Filters) { + f.Index++ + f.Filters[f.Index-1](request, response, f) + } else { + f.Target(request, response) + } +} + +// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction +type FilterFunction func(*Request, *Response, *FilterChain) + +// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching +// See examples/restful-no-cache-filter.go for usage +func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) { + resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. + resp.Header().Set("Pragma", "no-cache") // HTTP 1.0. + resp.Header().Set("Expires", "0") // Proxies. + chain.ProcessFilter(req, resp) +} diff --git a/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go b/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go new file mode 100644 index 000000000..c246512fc --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go @@ -0,0 +1,21 @@ +package restful + +import ( + "net/http" +) + +// HttpMiddlewareHandler is a function that takes a http.Handler and returns a http.Handler +type HttpMiddlewareHandler func(http.Handler) http.Handler + +// HttpMiddlewareHandlerToFilter converts a HttpMiddlewareHandler to a FilterFunction. +func HttpMiddlewareHandlerToFilter(middleware HttpMiddlewareHandler) FilterFunction { + return func(req *Request, resp *Response, chain *FilterChain) { + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + req.Request = r + resp.ResponseWriter = rw + chain.ProcessFilter(req, resp) + }) + + middleware(next).ServeHTTP(resp.ResponseWriter, req.Request) + } +} diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go new file mode 100644 index 000000000..871165166 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/json.go @@ -0,0 +1,11 @@ +// +build !jsoniter + +package restful + +import "encoding/json" + +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go new file mode 100644 index 000000000..11b8f8ae7 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go @@ -0,0 +1,12 @@ +// +build jsoniter + +package restful + +import "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go new file mode 100644 index 000000000..07a0c91e9 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -0,0 +1,326 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "errors" + "fmt" + "net/http" + "sort" + "strings" +) + +// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) +// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. +// RouterJSR311 implements the Router interface. +// Concept of locators is not implemented. +type RouterJSR311 struct{} + +// SelectRoute is part of the Router interface and returns the best match +// for the WebService and its Route for the given Request. +func (r RouterJSR311) SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { + + // Identify the root resource class (WebService) + dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) + if err != nil { + return nil, nil, NewError(http.StatusNotFound, "") + } + // Obtain the set of candidate methods (Routes) + routes := r.selectRoutes(dispatcher, finalMatch) + if len(routes) == 0 { + return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") + } + + // Identify the method (Route) that will handle the request + route, ok := r.detectRoute(routes, httpRequest) + return dispatcher, route, ok +} + +// ExtractParameters is used to obtain the path parameters from the route using the same matching +// engine as the JSR 311 router. +func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string { + webServiceExpr := webService.pathExpr + webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath) + pathParameters := r.extractParams(webServiceExpr, webServiceMatches) + routeExpr := route.pathExpr + routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1]) + routeParams := r.extractParams(routeExpr, routeMatches) + for key, value := range routeParams { + pathParameters[key] = value + } + return pathParameters +} + +func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string { + params := map[string]string{} + for i := 1; i < len(matches); i++ { + if len(pathExpr.VarNames) >= i { + params[pathExpr.VarNames[i-1]] = matches[i] + } + } + return params +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { + candidates := make([]*Route, 0, 8) + for i, each := range routes { + ok := true + for _, fn := range each.If { + if !fn(httpRequest) { + ok = false + break + } + } + if ok { + candidates = append(candidates, &routes[i]) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) + } + return nil, NewError(http.StatusNotFound, "404: Not Found") + } + + // http method + previous := candidates + candidates = candidates[:0] + for _, each := range previous { + if httpRequest.Method == each.Method { + candidates = append(candidates, each) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) + } + allowed := []string{} + allowedLoop: + for _, candidate := range previous { + for _, method := range allowed { + if method == candidate.Method { + continue allowedLoop + } + } + allowed = append(allowed, candidate.Method) + } + header := http.Header{"Allow": []string{strings.Join(allowed, ", ")}} + return nil, NewErrorWithHeader(http.StatusMethodNotAllowed, "405: Method Not Allowed", header) + } + + // content-type + contentType := httpRequest.Header.Get(HEADER_ContentType) + previous = candidates + candidates = candidates[:0] + for _, each := range previous { + if each.matchesContentType(contentType) { + candidates = append(candidates, each) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) + } + if httpRequest.ContentLength > 0 { + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") + } + } + + // accept + previous = candidates + candidates = candidates[:0] + accept := httpRequest.Header.Get(HEADER_Accept) + if len(accept) == 0 { + accept = "*/*" + } + for _, each := range previous { + if each.matchesAccept(accept) { + candidates = append(candidates, each) + } + } + if len(candidates) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) + } + available := []string{} + for _, candidate := range previous { + available = append(available, candidate.Produces...) + } + // if POST,PUT,PATCH without body + method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") + if (method == http.MethodPost || + method == http.MethodPut || + method == http.MethodPatch) && length == "" { + return nil, NewError( + http.StatusUnsupportedMediaType, + fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), + ) + } + return nil, NewError( + http.StatusNotAcceptable, + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), + ) + } + // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil + return candidates[0], nil +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// n/m > n/* > */* +func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { + // TODO + return &routes[0] +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) +func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { + filtered := &sortableRouteCandidates{} + for _, each := range dispatcher.Routes() { + pathExpr := each.pathExpr + matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) + if matches != nil { + lastMatch := matches[len(matches)-1] + if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. + filtered.candidates = append(filtered.candidates, + routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) + } + } + } + if len(filtered.candidates) == 0 { + if trace { + traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) + } + return []Route{} + } + sort.Sort(sort.Reverse(filtered)) + + // select other routes from candidates whoes expression matches rmatch + matchingRoutes := []Route{filtered.candidates[0].route} + for c := 1; c < len(filtered.candidates); c++ { + each := filtered.candidates[c] + if each.route.pathExpr.Matcher.MatchString(pathRemainder) { + matchingRoutes = append(matchingRoutes, each.route) + } + } + return matchingRoutes +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) +func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { + filtered := &sortableDispatcherCandidates{} + for _, each := range dispatchers { + matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) + if matches != nil { + filtered.candidates = append(filtered.candidates, + dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) + } + } + if len(filtered.candidates) == 0 { + if trace { + traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) + } + return nil, "", errors.New("not found") + } + sort.Sort(sort.Reverse(filtered)) + return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil +} + +// Types and functions to support the sorting of Routes + +type routeCandidate struct { + route Route + matchesCount int // the number of capturing groups + literalCount int // the number of literal characters (means those not resulting from template variable substitution) + nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) +} + +func (r routeCandidate) expressionToMatch() string { + return r.route.pathExpr.Source +} + +func (r routeCandidate) String() string { + return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) +} + +type sortableRouteCandidates struct { + candidates []routeCandidate +} + +func (rcs *sortableRouteCandidates) Len() int { + return len(rcs.candidates) +} +func (rcs *sortableRouteCandidates) Swap(i, j int) { + rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] +} +func (rcs *sortableRouteCandidates) Less(i, j int) bool { + ci := rcs.candidates[i] + cj := rcs.candidates[j] + // primary key + if ci.literalCount < cj.literalCount { + return true + } + if ci.literalCount > cj.literalCount { + return false + } + // secundary key + if ci.matchesCount < cj.matchesCount { + return true + } + if ci.matchesCount > cj.matchesCount { + return false + } + // tertiary key + if ci.nonDefaultCount < cj.nonDefaultCount { + return true + } + if ci.nonDefaultCount > cj.nonDefaultCount { + return false + } + // quaternary key ("source" is interpreted as Path) + return ci.route.Path < cj.route.Path +} + +// Types and functions to support the sorting of Dispatchers + +type dispatcherCandidate struct { + dispatcher *WebService + finalMatch string + matchesCount int // the number of capturing groups + literalCount int // the number of literal characters (means those not resulting from template variable substitution) + nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) +} +type sortableDispatcherCandidates struct { + candidates []dispatcherCandidate +} + +func (dc *sortableDispatcherCandidates) Len() int { + return len(dc.candidates) +} +func (dc *sortableDispatcherCandidates) Swap(i, j int) { + dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] +} +func (dc *sortableDispatcherCandidates) Less(i, j int) bool { + ci := dc.candidates[i] + cj := dc.candidates[j] + // primary key + if ci.matchesCount < cj.matchesCount { + return true + } + if ci.matchesCount > cj.matchesCount { + return false + } + // secundary key + if ci.literalCount < cj.literalCount { + return true + } + if ci.literalCount > cj.literalCount { + return false + } + // tertiary key + return ci.nonDefaultCount < cj.nonDefaultCount +} diff --git a/vendor/github.com/emicklei/go-restful/v3/log/log.go b/vendor/github.com/emicklei/go-restful/v3/log/log.go new file mode 100644 index 000000000..6cd44c7a5 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/log/log.go @@ -0,0 +1,34 @@ +package log + +import ( + stdlog "log" + "os" +) + +// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) +} + +var Logger StdLogger + +func init() { + // default Logger + SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) +} + +// SetLogger sets the logger for this package +func SetLogger(customLogger StdLogger) { + Logger = customLogger +} + +// Print delegates to the Logger +func Print(v ...interface{}) { + Logger.Print(v...) +} + +// Printf delegates to the Logger +func Printf(format string, v ...interface{}) { + Logger.Printf(format, v...) +} diff --git a/vendor/github.com/emicklei/go-restful/v3/logger.go b/vendor/github.com/emicklei/go-restful/v3/logger.go new file mode 100644 index 000000000..29202726f --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/logger.go @@ -0,0 +1,32 @@ +package restful + +// Copyright 2014 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. +import ( + "github.com/emicklei/go-restful/v3/log" +) + +var trace bool = false +var traceLogger log.StdLogger + +func init() { + traceLogger = log.Logger // use the package logger by default +} + +// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. +// You may call EnableTracing() directly to enable trace logging to the package-wide logger. +func TraceLogger(logger log.StdLogger) { + traceLogger = logger + EnableTracing(logger != nil) +} + +// SetLogger exposes the setter for the global logger on the top-level package +func SetLogger(customLogger log.StdLogger) { + log.SetLogger(customLogger) +} + +// EnableTracing can be used to Trace logging on and off. +func EnableTracing(enabled bool) { + trace = enabled +} diff --git a/vendor/github.com/emicklei/go-restful/v3/mime.go b/vendor/github.com/emicklei/go-restful/v3/mime.go new file mode 100644 index 000000000..33014471b --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/mime.go @@ -0,0 +1,50 @@ +package restful + +import ( + "strconv" + "strings" +) + +type mime struct { + media string + quality float64 +} + +// insertMime adds a mime to a list and keeps it sorted by quality. +func insertMime(l []mime, e mime) []mime { + for i, each := range l { + // if current mime has lower quality then insert before + if e.quality > each.quality { + left := append([]mime{}, l[0:i]...) + return append(append(left, e), l[i:]...) + } + } + return append(l, e) +} + +const qFactorWeightingKey = "q" + +// sortedMimes returns a list of mime sorted (desc) by its specified quality. +// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3 +func sortedMimes(accept string) (sorted []mime) { + for _, each := range strings.Split(accept, ",") { + typeAndQuality := strings.Split(strings.Trim(each, " "), ";") + if len(typeAndQuality) == 1 { + sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) + } else { + // take factor + qAndWeight := strings.Split(typeAndQuality[1], "=") + if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey { + f, err := strconv.ParseFloat(qAndWeight[1], 64) + if err != nil { + traceLogger.Printf("unable to parse quality in %s, %v", each, err) + } else { + sorted = insertMime(sorted, mime{typeAndQuality[0], f}) + } + } else { + sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) + } + } + } + return +} diff --git a/vendor/github.com/emicklei/go-restful/v3/options_filter.go b/vendor/github.com/emicklei/go-restful/v3/options_filter.go new file mode 100644 index 000000000..5c1b34251 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/options_filter.go @@ -0,0 +1,34 @@ +package restful + +import "strings" + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method +// and provides the response with a set of allowed methods for the request URL Path. +// As for any filter, you can also install it for a particular WebService within a Container. +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). +func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { + if "OPTIONS" != req.Request.Method { + chain.ProcessFilter(req, resp) + return + } + + archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) + methods := strings.Join(c.computeAllowedMethods(req), ",") + origin := req.Request.Header.Get(HEADER_Origin) + + resp.AddHeader(HEADER_Allow, methods) + resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) + resp.AddHeader(HEADER_AccessControlAllowHeaders, archs) + resp.AddHeader(HEADER_AccessControlAllowMethods, methods) +} + +// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method +// and provides the response with a set of allowed methods for the request URL Path. +// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). +func OPTIONSFilter() FilterFunction { + return DefaultContainer.OPTIONSFilter +} diff --git a/vendor/github.com/emicklei/go-restful/v3/parameter.go b/vendor/github.com/emicklei/go-restful/v3/parameter.go new file mode 100644 index 000000000..0b851bb43 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/parameter.go @@ -0,0 +1,242 @@ +package restful + +import "sort" + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +const ( + // PathParameterKind = indicator of Request parameter type "path" + PathParameterKind = iota + + // QueryParameterKind = indicator of Request parameter type "query" + QueryParameterKind + + // BodyParameterKind = indicator of Request parameter type "body" + BodyParameterKind + + // HeaderParameterKind = indicator of Request parameter type "header" + HeaderParameterKind + + // FormParameterKind = indicator of Request parameter type "form" + FormParameterKind + + // MultiPartFormParameterKind = indicator of Request parameter type "multipart/form-data" + MultiPartFormParameterKind + + // CollectionFormatCSV comma separated values `foo,bar` + CollectionFormatCSV = CollectionFormat("csv") + + // CollectionFormatSSV space separated values `foo bar` + CollectionFormatSSV = CollectionFormat("ssv") + + // CollectionFormatTSV tab separated values `foo\tbar` + CollectionFormatTSV = CollectionFormat("tsv") + + // CollectionFormatPipes pipe separated values `foo|bar` + CollectionFormatPipes = CollectionFormat("pipes") + + // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single + // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters + CollectionFormatMulti = CollectionFormat("multi") +) + +type CollectionFormat string + +func (cf CollectionFormat) String() string { + return string(cf) +} + +// Parameter is for documententing the parameter used in a Http Request +// ParameterData kinds are Path,Query and Body +type Parameter struct { + data *ParameterData +} + +// ParameterData represents the state of a Parameter. +// It is made public to make it accessible to e.g. the Swagger package. +type ParameterData struct { + ExtensionProperties + Name, Description, DataType, DataFormat string + Kind int + Required bool + // AllowableValues is deprecated. Use PossibleValues instead + AllowableValues map[string]string + PossibleValues []string + AllowMultiple bool + AllowEmptyValue bool + DefaultValue string + CollectionFormat string + Pattern string + Minimum *float64 + Maximum *float64 + MinLength *int64 + MaxLength *int64 + MinItems *int64 + MaxItems *int64 + UniqueItems bool +} + +// Data returns the state of the Parameter +func (p *Parameter) Data() ParameterData { + return *p.data +} + +// Kind returns the parameter type indicator (see const for valid values) +func (p *Parameter) Kind() int { + return p.data.Kind +} + +func (p *Parameter) bePath() *Parameter { + p.data.Kind = PathParameterKind + return p +} +func (p *Parameter) beQuery() *Parameter { + p.data.Kind = QueryParameterKind + return p +} +func (p *Parameter) beBody() *Parameter { + p.data.Kind = BodyParameterKind + return p +} + +func (p *Parameter) beHeader() *Parameter { + p.data.Kind = HeaderParameterKind + return p +} + +func (p *Parameter) beForm() *Parameter { + p.data.Kind = FormParameterKind + return p +} + +func (p *Parameter) beMultiPartForm() *Parameter { + p.data.Kind = MultiPartFormParameterKind + return p +} + +// Required sets the required field and returns the receiver +func (p *Parameter) Required(required bool) *Parameter { + p.data.Required = required + return p +} + +// AllowMultiple sets the allowMultiple field and returns the receiver +func (p *Parameter) AllowMultiple(multiple bool) *Parameter { + p.data.AllowMultiple = multiple + return p +} + +// AddExtension adds or updates a key=value pair to the extension map +func (p *Parameter) AddExtension(key string, value interface{}) *Parameter { + p.data.AddExtension(key, value) + return p +} + +// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver +func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter { + p.data.AllowEmptyValue = multiple + return p +} + +// AllowableValues is deprecated. Use PossibleValues instead. Both will be set. +func (p *Parameter) AllowableValues(values map[string]string) *Parameter { + p.data.AllowableValues = values + + allowableSortedKeys := make([]string, 0, len(values)) + for k := range values { + allowableSortedKeys = append(allowableSortedKeys, k) + } + sort.Strings(allowableSortedKeys) + + p.data.PossibleValues = make([]string, 0, len(values)) + for _, k := range allowableSortedKeys { + p.data.PossibleValues = append(p.data.PossibleValues, values[k]) + } + return p +} + +// PossibleValues sets the possible values field and returns the receiver +func (p *Parameter) PossibleValues(values []string) *Parameter { + p.data.PossibleValues = values + return p +} + +// DataType sets the dataType field and returns the receiver +func (p *Parameter) DataType(typeName string) *Parameter { + p.data.DataType = typeName + return p +} + +// DataFormat sets the dataFormat field for Swagger UI +func (p *Parameter) DataFormat(formatName string) *Parameter { + p.data.DataFormat = formatName + return p +} + +// DefaultValue sets the default value field and returns the receiver +func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { + p.data.DefaultValue = stringRepresentation + return p +} + +// Description sets the description value field and returns the receiver +func (p *Parameter) Description(doc string) *Parameter { + p.data.Description = doc + return p +} + +// CollectionFormat sets the collection format for an array type +func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { + p.data.CollectionFormat = format.String() + return p +} + +// Pattern sets the pattern field and returns the receiver +func (p *Parameter) Pattern(pattern string) *Parameter { + p.data.Pattern = pattern + return p +} + +// Minimum sets the minimum field and returns the receiver +func (p *Parameter) Minimum(minimum float64) *Parameter { + p.data.Minimum = &minimum + return p +} + +// Maximum sets the maximum field and returns the receiver +func (p *Parameter) Maximum(maximum float64) *Parameter { + p.data.Maximum = &maximum + return p +} + +// MinLength sets the minLength field and returns the receiver +func (p *Parameter) MinLength(minLength int64) *Parameter { + p.data.MinLength = &minLength + return p +} + +// MaxLength sets the maxLength field and returns the receiver +func (p *Parameter) MaxLength(maxLength int64) *Parameter { + p.data.MaxLength = &maxLength + return p +} + +// MinItems sets the minItems field and returns the receiver +func (p *Parameter) MinItems(minItems int64) *Parameter { + p.data.MinItems = &minItems + return p +} + +// MaxItems sets the maxItems field and returns the receiver +func (p *Parameter) MaxItems(maxItems int64) *Parameter { + p.data.MaxItems = &maxItems + return p +} + +// UniqueItems sets the uniqueItems field and returns the receiver +func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter { + p.data.UniqueItems = uniqueItems + return p +} diff --git a/vendor/github.com/emicklei/go-restful/v3/path_expression.go b/vendor/github.com/emicklei/go-restful/v3/path_expression.go new file mode 100644 index 000000000..95a9a2545 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/path_expression.go @@ -0,0 +1,74 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "fmt" + "regexp" + "strings" +) + +// PathExpression holds a compiled path expression (RegExp) needed to match against +// Http request paths and to extract path parameter values. +type pathExpression struct { + LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) + VarNames []string // the names of parameters (enclosed by {}) in the path + VarCount int // the number of named parameters (enclosed by {}) in the path + Matcher *regexp.Regexp + Source string // Path as defined by the RouteBuilder + tokens []string +} + +// NewPathExpression creates a PathExpression from the input URL path. +// Returns an error if the path is invalid. +func newPathExpression(path string) (*pathExpression, error) { + expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path) + compiled, err := regexp.Compile(expression) + if err != nil { + return nil, err + } + return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil +} + +// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 +func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { + var buffer bytes.Buffer + buffer.WriteString("^") + //tokens = strings.Split(template, "/") + tokens = tokenizePath(template) + for _, each := range tokens { + if each == "" { + continue + } + buffer.WriteString("/") + if strings.HasPrefix(each, "{") { + // check for regular expression in variable + colon := strings.Index(each, ":") + var varName string + if colon != -1 { + // extract expression + varName = strings.TrimSpace(each[1:colon]) + paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) + if paramExpr == "*" { // special case + buffer.WriteString("(.*)") + } else { + buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache + } + } else { + // plain var + varName = strings.TrimSpace(each[1 : len(each)-1]) + buffer.WriteString("([^/]+?)") + } + varNames = append(varNames, varName) + varCount += 1 + } else { + literalCount += len(each) + encoded := each // TODO URI encode + buffer.WriteString(regexp.QuoteMeta(encoded)) + } + } + return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens +} diff --git a/vendor/github.com/emicklei/go-restful/v3/path_processor.go b/vendor/github.com/emicklei/go-restful/v3/path_processor.go new file mode 100644 index 000000000..141573245 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/path_processor.go @@ -0,0 +1,74 @@ +package restful + +import ( + "bytes" + "strings" +) + +// Copyright 2018 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. +// If a Router does not implement this interface then the default behaviour will be used. +type PathProcessor interface { + // ExtractParameters gets the path parameters defined in the route and webService from the urlPath + ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string +} + +type defaultPathProcessor struct{} + +// Extract the parameters from the request url path +func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string { + urlParts := tokenizePath(urlPath) + pathParameters := map[string]string{} + for i, key := range r.pathParts { + var value string + if i >= len(urlParts) { + value = "" + } else { + value = urlParts[i] + } + if r.hasCustomVerb && hasCustomVerb(key) { + key = removeCustomVerb(key) + value = removeCustomVerb(value) + } + + if strings.Index(key, "{") > -1 { // path-parameter + if colon := strings.Index(key, ":"); colon != -1 { + // extract by regex + regPart := key[colon+1 : len(key)-1] + keyPart := key[1:colon] + if regPart == "*" { + pathParameters[keyPart] = untokenizePath(i, urlParts) + break + } else { + pathParameters[keyPart] = value + } + } else { + // without enclosing {} + startIndex := strings.Index(key, "{") + endKeyIndex := strings.Index(key, "}") + + suffixLength := len(key) - endKeyIndex - 1 + endValueIndex := len(value) - suffixLength + + pathParameters[key[startIndex+1:endKeyIndex]] = value[startIndex:endValueIndex] + } + } + } + return pathParameters +} + +// Untokenize back into an URL path using the slash separator +func untokenizePath(offset int, parts []string) string { + var buffer bytes.Buffer + for p := offset; p < len(parts); p++ { + buffer.WriteString(parts[p]) + // do not end + if p < len(parts)-1 { + buffer.WriteString("/") + } + } + return buffer.String() +} diff --git a/vendor/github.com/emicklei/go-restful/v3/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go new file mode 100644 index 000000000..0020095e8 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -0,0 +1,133 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "compress/zlib" + "net/http" +) + +var defaultRequestContentType string + +// Request is a wrapper for a http Request that provides convenience methods +type Request struct { + Request *http.Request + pathParameters map[string]string + attributes map[string]interface{} // for storing request-scoped values + selectedRoute *Route // is nil when no route was matched +} + +func NewRequest(httpRequest *http.Request) *Request { + return &Request{ + Request: httpRequest, + pathParameters: map[string]string{}, + attributes: map[string]interface{}{}, + } // empty parameters, attributes +} + +// If ContentType is missing or */* is given then fall back to this type, otherwise +// a "Unable to unmarshal content of type:" response is returned. +// Valid values are restful.MIME_JSON and restful.MIME_XML +// Example: +// +// restful.DefaultRequestContentType(restful.MIME_JSON) +func DefaultRequestContentType(mime string) { + defaultRequestContentType = mime +} + +// PathParameter accesses the Path parameter value by its name +func (r *Request) PathParameter(name string) string { + return r.pathParameters[name] +} + +// PathParameters accesses the Path parameter values +func (r *Request) PathParameters() map[string]string { + return r.pathParameters +} + +// QueryParameter returns the (first) Query parameter value by its name +func (r *Request) QueryParameter(name string) string { + return r.Request.URL.Query().Get(name) +} + +// QueryParameters returns the all the query parameters values by name +func (r *Request) QueryParameters(name string) []string { + return r.Request.URL.Query()[name] +} + +// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. +func (r *Request) BodyParameter(name string) (string, error) { + err := r.Request.ParseForm() + if err != nil { + return "", err + } + return r.Request.PostFormValue(name), nil +} + +// HeaderParameter returns the HTTP Header value of a Header name or empty if missing +func (r *Request) HeaderParameter(name string) string { + return r.Request.Header.Get(name) +} + +// ReadEntity checks the Accept header and reads the content into the entityPointer. +func (r *Request) ReadEntity(entityPointer interface{}) (err error) { + contentType := r.Request.Header.Get(HEADER_ContentType) + contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) + + // check if the request body needs decompression + if ENCODING_GZIP == contentEncoding { + gzipReader := currentCompressorProvider.AcquireGzipReader() + defer currentCompressorProvider.ReleaseGzipReader(gzipReader) + gzipReader.Reset(r.Request.Body) + r.Request.Body = gzipReader + } else if ENCODING_DEFLATE == contentEncoding { + zlibReader, err := zlib.NewReader(r.Request.Body) + if err != nil { + return err + } + r.Request.Body = zlibReader + } + + // lookup the EntityReader, use defaultRequestContentType if needed and provided + entityReader, ok := entityAccessRegistry.accessorAt(contentType) + if !ok { + if len(defaultRequestContentType) != 0 { + entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) + } + if !ok { + return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) + } + } + return entityReader.Read(r, entityPointer) +} + +// SetAttribute adds or replaces the attribute with the given value. +func (r *Request) SetAttribute(name string, value interface{}) { + r.attributes[name] = value +} + +// Attribute returns the value associated to the given name. Returns nil if absent. +func (r Request) Attribute(name string) interface{} { + return r.attributes[name] +} + +// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees +// If no route was matched then return an empty string. +func (r Request) SelectedRoutePath() string { + if r.selectedRoute == nil { + return "" + } + // skip creating an accessor + return r.selectedRoute.Path +} + +// SelectedRoute returns a reader to access the selected Route by the container +// Returns nil if no route was matched. +func (r Request) SelectedRoute() RouteReader { + if r.selectedRoute == nil { + return nil + } + return routeAccessor{route: r.selectedRoute} +} diff --git a/vendor/github.com/emicklei/go-restful/v3/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go new file mode 100644 index 000000000..a41a92cc2 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -0,0 +1,259 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bufio" + "errors" + "net" + "net/http" +) + +// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime) +var DefaultResponseMimeType string + +//PrettyPrintResponses controls the indentation feature of XML and JSON serialization +var PrettyPrintResponses = true + +// Response is a wrapper on the actual http ResponseWriter +// It provides several convenience methods to prepare and write response content. +type Response struct { + http.ResponseWriter + requestAccept string // mime-type what the Http Request says it wants to receive + routeProduces []string // mime-types what the Route says it can produce + statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) + contentLength int // number of bytes written for the response body + prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. + err error // err property is kept when WriteError is called + hijacker http.Hijacker // if underlying ResponseWriter supports it +} + +// NewResponse creates a new response based on a http ResponseWriter. +func NewResponse(httpWriter http.ResponseWriter) *Response { + hijacker, _ := httpWriter.(http.Hijacker) + return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker} +} + +// DefaultResponseContentType set a default. +// If Accept header matching fails, fall back to this type. +// Valid values are restful.MIME_JSON and restful.MIME_XML +// Example: +// restful.DefaultResponseContentType(restful.MIME_JSON) +func DefaultResponseContentType(mime string) { + DefaultResponseMimeType = mime +} + +// InternalServerError writes the StatusInternalServerError header. +// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) +func (r Response) InternalServerError() Response { + r.WriteHeader(http.StatusInternalServerError) + return r +} + +// Hijack implements the http.Hijacker interface. This expands +// the Response to fulfill http.Hijacker if the underlying +// http.ResponseWriter supports it. +func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if r.hijacker == nil { + return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter") + } + return r.hijacker.Hijack() +} + +// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. +func (r *Response) PrettyPrint(bePretty bool) { + r.prettyPrint = bePretty +} + +// AddHeader is a shortcut for .Header().Add(header,value) +func (r Response) AddHeader(header string, value string) Response { + r.Header().Add(header, value) + return r +} + +// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. +func (r *Response) SetRequestAccepts(mime string) { + r.requestAccept = mime +} + +// EntityWriter returns the registered EntityWriter that the entity (requested resource) +// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. +// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. +func (r *Response) EntityWriter() (EntityReaderWriter, bool) { + sorted := sortedMimes(r.requestAccept) + for _, eachAccept := range sorted { + for _, eachProduce := range r.routeProduces { + if eachProduce == eachAccept.media { + if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { + return w, true + } + } + } + if eachAccept.media == "*/*" { + for _, each := range r.routeProduces { + if w, ok := entityAccessRegistry.accessorAt(each); ok { + return w, true + } + } + } + } + // if requestAccept is empty + writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) + if !ok { + // if not registered then fallback to the defaults (if set) + if DefaultResponseMimeType == MIME_JSON { + return entityAccessRegistry.accessorAt(MIME_JSON) + } + if DefaultResponseMimeType == MIME_XML { + return entityAccessRegistry.accessorAt(MIME_XML) + } + if DefaultResponseMimeType == MIME_ZIP { + return entityAccessRegistry.accessorAt(MIME_ZIP) + } + // Fallback to whatever the route says it can produce. + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + for _, each := range r.routeProduces { + if w, ok := entityAccessRegistry.accessorAt(each); ok { + return w, true + } + } + if trace { + traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) + } + } + return writer, ok +} + +// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) +func (r *Response) WriteEntity(value interface{}) error { + return r.WriteHeaderAndEntity(http.StatusOK, value) +} + +// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. +// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. +// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. +// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. +// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. +// Current implementation ignores any q-parameters in the Accept Header. +// Returns an error if the value could not be written on the response. +func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { + writer, ok := r.EntityWriter() + if !ok { + r.WriteHeader(http.StatusNotAcceptable) + return nil + } + return writer.Write(r, status, value) +} + +// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteAsXml(value interface{}) error { + return writeXML(r, http.StatusOK, MIME_XML, value) +} + +// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) +// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { + return writeXML(r, status, MIME_XML, value) +} + +// WriteAsJson is a convenience method for writing a value in json. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteAsJson(value interface{}) error { + return writeJSON(r, http.StatusOK, MIME_JSON, value) +} + +// WriteJson is a convenience method for writing a value in Json with a given Content-Type. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteJson(value interface{}, contentType string) error { + return writeJSON(r, http.StatusOK, contentType, value) +} + +// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. +// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. +func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { + return writeJSON(r, status, contentType, value) +} + +// WriteError writes the http status and the error string on the response. err can be nil. +// Return an error if writing was not successful. +func (r *Response) WriteError(httpStatus int, err error) (writeErr error) { + r.err = err + if err == nil { + writeErr = r.WriteErrorString(httpStatus, "") + } else { + writeErr = r.WriteErrorString(httpStatus, err.Error()) + } + return writeErr +} + +// WriteServiceError is a convenience method for a responding with a status and a ServiceError +func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { + r.err = err + return r.WriteHeaderAndEntity(httpStatus, err) +} + +// WriteErrorString is a convenience method for an error status with the actual error +func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { + if r.err == nil { + // if not called from WriteError + r.err = errors.New(errorReason) + } + r.WriteHeader(httpStatus) + if _, err := r.Write([]byte(errorReason)); err != nil { + return err + } + return nil +} + +// Flush implements http.Flusher interface, which sends any buffered data to the client. +func (r *Response) Flush() { + if f, ok := r.ResponseWriter.(http.Flusher); ok { + f.Flush() + } else if trace { + traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) + } +} + +// WriteHeader is overridden to remember the Status Code that has been written. +// Changes to the Header of the response have no effect after this. +func (r *Response) WriteHeader(httpStatus int) { + r.statusCode = httpStatus + r.ResponseWriter.WriteHeader(httpStatus) +} + +// StatusCode returns the code that has been written using WriteHeader. +func (r Response) StatusCode() int { + if 0 == r.statusCode { + // no status code has been written yet; assume OK + return http.StatusOK + } + return r.statusCode +} + +// Write writes the data to the connection as part of an HTTP reply. +// Write is part of http.ResponseWriter interface. +func (r *Response) Write(bytes []byte) (int, error) { + written, err := r.ResponseWriter.Write(bytes) + r.contentLength += written + return written, err +} + +// ContentLength returns the number of bytes written for the response content. +// Note that this value is only correct if all data is written through the Response using its Write* methods. +// Data written directly using the underlying http.ResponseWriter is not accounted for. +func (r Response) ContentLength() int { + return r.contentLength +} + +// CloseNotify is part of http.CloseNotifier interface +func (r Response) CloseNotify() <-chan bool { + return r.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// Error returns the err created by WriteError +func (r Response) Error() error { + return r.err +} diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go new file mode 100644 index 000000000..306c44be7 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -0,0 +1,191 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" + "strings" +) + +// RouteFunction declares the signature of a function that can be bound to a Route. +type RouteFunction func(*Request, *Response) + +// RouteSelectionConditionFunction declares the signature of a function that +// can be used to add extra conditional logic when selecting whether the route +// matches the HTTP request. +type RouteSelectionConditionFunction func(httpRequest *http.Request) bool + +// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. +type Route struct { + ExtensionProperties + Method string + Produces []string + Consumes []string + Path string // webservice root path + described path + Function RouteFunction + Filters []FilterFunction + If []RouteSelectionConditionFunction + + // cached values for dispatching + relativePath string + pathParts []string + pathExpr *pathExpression // cached compilation of relativePath as RegExp + + // documentation + Doc string + Notes string + Operation string + ParameterDocs []*Parameter + ResponseErrors map[int]ResponseError + DefaultResponse *ResponseError + ReadSample, WriteSample interface{} // structs that model an example request or response payload + WriteSamples []interface{} // if more than one return types is possible (oneof) then this will contain multiple values + + // Extra information used to store custom information about the route. + Metadata map[string]interface{} + + // marks a route as deprecated + Deprecated bool + + //Overrides the container.contentEncodingEnabled + contentEncodingEnabled *bool + + // indicate route path has custom verb + hasCustomVerb bool + + // if a request does not include a content-type header then + // depending on the method, it may return a 415 Unsupported Media + // Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... + allowedMethodsWithoutContentType []string +} + +// Initialize for Route +func (r *Route) postBuild() { + r.pathParts = tokenizePath(r.Path) + r.hasCustomVerb = hasCustomVerb(r.Path) +} + +// Create Request and Response from their http versions +func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { + wrappedRequest := NewRequest(httpRequest) + wrappedRequest.pathParameters = pathParams + wrappedRequest.selectedRoute = r + wrappedResponse := NewResponse(httpWriter) + wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) + wrappedResponse.routeProduces = r.Produces + return wrappedRequest, wrappedResponse +} + +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +// Return whether the mimeType matches to what this Route can produce. +func (r Route) matchesAccept(mimeTypesWithQuality string) bool { + remaining := mimeTypesWithQuality + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" + } else { + mimeType, remaining = remaining[:end], remaining[end+1:] + } + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] + } + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) + if mimeType == "*/*" { + return true + } + for _, producibleType := range r.Produces { + if producibleType == "*/*" || producibleType == mimeType { + return true + } + } + if len(remaining) == 0 { + return false + } + } +} + +// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +func (r Route) matchesContentType(mimeTypes string) bool { + + if len(r.Consumes) == 0 { + // did not specify what it can consume ; any media type (“*/*”) is assumed + return true + } + + if len(mimeTypes) == 0 { + // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type + m := r.Method + // if route specifies less or non-idempotent methods then use that + if len(r.allowedMethodsWithoutContentType) > 0 { + for _, each := range r.allowedMethodsWithoutContentType { + if m == each { + return true + } + } + } else { + if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { + return true + } + } + // proceed with default + mimeTypes = MIME_OCTET + } + + remaining := mimeTypes + for { + var mimeType string + if end := strings.Index(remaining, ","); end == -1 { + mimeType, remaining = remaining, "" + } else { + mimeType, remaining = remaining[:end], remaining[end+1:] + } + if quality := strings.Index(mimeType, ";"); quality != -1 { + mimeType = mimeType[:quality] + } + mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) + for _, consumeableType := range r.Consumes { + if consumeableType == "*/*" || consumeableType == mimeType { + return true + } + } + if len(remaining) == 0 { + return false + } + } +} + +// Tokenize an URL path using the slash separator ; the result does not have empty tokens +func tokenizePath(path string) []string { + if "/" == path { + return nil + } + if TrimRightSlashEnabled { + // 3.9.0 + return strings.Split(strings.Trim(path, "/"), "/") + } else { + // 3.10.2 + return strings.Split(strings.TrimLeft(path, "/"), "/") + } +} + +// for debugging +func (r *Route) String() string { + return r.Method + " " + r.Path +} + +// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. +func (r *Route) EnableContentEncoding(enabled bool) { + r.contentEncodingEnabled = &enabled +} + +// TrimRightSlashEnabled controls whether +// - path on route building is using path.Join +// - the path of the incoming request is trimmed of its slash suffux. +// Value of true matches the behavior of <= 3.9.0 +var TrimRightSlashEnabled = true diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go new file mode 100644 index 000000000..75168c12e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -0,0 +1,389 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "fmt" + "os" + "path" + "reflect" + "runtime" + "strings" + "sync/atomic" + + "github.com/emicklei/go-restful/v3/log" +) + +// RouteBuilder is a helper to construct Routes. +type RouteBuilder struct { + rootPath string + currentPath string + produces []string + consumes []string + httpMethod string // required + function RouteFunction // required + filters []FilterFunction + conditions []RouteSelectionConditionFunction + allowedMethodsWithoutContentType []string // see Route + + typeNameHandleFunc TypeNameHandleFunction // required + + // documentation + doc string + notes string + operation string + readSample interface{} + writeSamples []interface{} + parameters []*Parameter + errorMap map[int]ResponseError + defaultResponse *ResponseError + metadata map[string]interface{} + extensions map[string]interface{} + deprecated bool + contentEncodingEnabled *bool +} + +// Do evaluates each argument with the RouteBuilder itself. +// This allows you to follow DRY principles without breaking the fluent programming style. +// Example: +// +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } +func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { + for _, each := range oneArgBlocks { + each(b) + } + return b +} + +// To bind the route to a function. +// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. +func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { + b.function = function + return b +} + +// Method specifies what HTTP method to match. Required. +func (b *RouteBuilder) Method(method string) *RouteBuilder { + b.httpMethod = method + return b +} + +// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. +func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { + b.produces = mimeTypes + return b +} + +// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these +func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { + b.consumes = mimeTypes + return b +} + +// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". +func (b *RouteBuilder) Path(subPath string) *RouteBuilder { + b.currentPath = subPath + return b +} + +// Doc tells what this route is all about. Optional. +func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { + b.doc = documentation + return b +} + +// Notes is a verbose explanation of the operation behavior. Optional. +func (b *RouteBuilder) Notes(notes string) *RouteBuilder { + b.notes = notes + return b +} + +// Reads tells what resource type will be read from the request payload. Optional. +// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. +func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder { + fn := b.typeNameHandleFunc + if fn == nil { + fn = reflectTypeName + } + typeAsName := fn(sample) + description := "" + if len(optionalDescription) > 0 { + description = optionalDescription[0] + } + b.readSample = sample + bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}} + bodyParameter.beBody() + bodyParameter.Required(true) + bodyParameter.DataType(typeAsName) + b.Param(bodyParameter) + return b +} + +// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. +// Use this to modify or extend information for the Parameter (through its Data()). +func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { + for _, each := range b.parameters { + if each.Data().Name == name { + return each + } + } + return p +} + +// Writes tells which one of the resource types will be written as the response payload. Optional. +func (b *RouteBuilder) Writes(samples ...interface{}) *RouteBuilder { + b.writeSamples = samples // oneof + return b +} + +// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). +func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { + if b.parameters == nil { + b.parameters = []*Parameter{} + } + b.parameters = append(b.parameters, parameter) + return b +} + +// Operation allows you to document what the actual method/function call is of the Route. +// Unless called, the operation name is derived from the RouteFunction set using To(..). +func (b *RouteBuilder) Operation(name string) *RouteBuilder { + b.operation = name + return b +} + +// ReturnsError is deprecated, use Returns instead. +func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { + log.Print("ReturnsError is deprecated, use Returns instead.") + return b.Returns(code, message, model) +} + +// Returns allows you to document what responses (errors or regular) can be expected. +// The model parameter is optional ; either pass a struct instance or use nil if not applicable. +func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { + err := ResponseError{ + Code: code, + Message: message, + Model: model, + IsDefault: false, // this field is deprecated, use default response instead. + } + // lazy init because there is no NewRouteBuilder (yet) + if b.errorMap == nil { + b.errorMap = map[int]ResponseError{} + } + b.errorMap[code] = err + return b +} + +// ReturnsWithHeaders is similar to Returns, but can specify response headers +func (b *RouteBuilder) ReturnsWithHeaders(code int, message string, model interface{}, headers map[string]Header) *RouteBuilder { + b.Returns(code, message, model) + err := b.errorMap[code] + err.Headers = headers + b.errorMap[code] = err + return b +} + +// DefaultReturns is a special Returns call that sets the default of the response. +func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { + b.defaultResponse = &ResponseError{ + Message: message, + Model: model, + } + return b +} + +// Metadata adds or updates a key=value pair to the metadata map. +func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { + if b.metadata == nil { + b.metadata = map[string]interface{}{} + } + b.metadata[key] = value + return b +} + +// AddExtension adds or updates a key=value pair to the extensions map. +func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder { + if b.extensions == nil { + b.extensions = map[string]interface{}{} + } + b.extensions[key] = value + return b +} + +// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use +func (b *RouteBuilder) Deprecate() *RouteBuilder { + b.deprecated = true + return b +} + +// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE +// If a request does not include a content-type header then +// depending on the method, it may return a 415 Unsupported Media. +// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... +func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *RouteBuilder { + b.allowedMethodsWithoutContentType = methods + return b +} + +// ResponseError represents a response; not necessarily an error. +type ResponseError struct { + ExtensionProperties + Code int + Message string + Model interface{} + Headers map[string]Header + IsDefault bool +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + *Items + Description string +} + +// Items describe swagger simple schemas for headers +type Items struct { + Type string + Format string + Items *Items + CollectionFormat string + Default interface{} +} + +func (b *RouteBuilder) servicePath(path string) *RouteBuilder { + b.rootPath = path + return b +} + +// Filter appends a FilterFunction to the end of filters for this Route to build. +func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { + b.filters = append(b.filters, filter) + return b +} + +// If sets a condition function that controls matching the Route based on custom logic. +// The condition function is provided the HTTP request and should return true if the route +// should be considered. +// +// Efficiency note: the condition function is called before checking the method, produces, and +// consumes criteria, so that the correct HTTP status code can be returned. +// +// Lifecycle note: no filter functions have been called prior to calling the condition function, +// so the condition function should not depend on any context that might be set up by container +// or route filters. +func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder { + b.conditions = append(b.conditions, condition) + return b +} + +// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response. +func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder { + b.contentEncodingEnabled = &enabled + return b +} + +// If no specific Route path then set to rootPath +// If no specific Produces then set to rootProduces +// If no specific Consumes then set to rootConsumes +func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { + if len(b.produces) == 0 { + b.produces = rootProduces + } + if len(b.consumes) == 0 { + b.consumes = rootConsumes + } +} + +// typeNameHandler sets the function that will convert types to strings in the parameter +// and model definitions. +func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder { + b.typeNameHandleFunc = handler + return b +} + +// Build creates a new Route using the specification details collected by the RouteBuilder +func (b *RouteBuilder) Build() Route { + pathExpr, err := newPathExpression(b.currentPath) + if err != nil { + log.Printf("Invalid path:%s because:%v", b.currentPath, err) + os.Exit(1) + } + if b.function == nil { + log.Printf("No function specified for route:" + b.currentPath) + os.Exit(1) + } + operationName := b.operation + if len(operationName) == 0 && b.function != nil { + // extract from definition + operationName = nameOfFunction(b.function) + } + route := Route{ + Method: b.httpMethod, + Path: concatPath(b.rootPath, b.currentPath), + Produces: b.produces, + Consumes: b.consumes, + Function: b.function, + Filters: b.filters, + If: b.conditions, + relativePath: b.currentPath, + pathExpr: pathExpr, + Doc: b.doc, + Notes: b.notes, + Operation: operationName, + ParameterDocs: b.parameters, + ResponseErrors: b.errorMap, + DefaultResponse: b.defaultResponse, + ReadSample: b.readSample, + WriteSamples: b.writeSamples, + Metadata: b.metadata, + Deprecated: b.deprecated, + contentEncodingEnabled: b.contentEncodingEnabled, + allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, + } + // set WriteSample if one specified + if len(b.writeSamples) == 1 { + route.WriteSample = b.writeSamples[0] + } + route.Extensions = b.extensions + route.postBuild() + return route +} + +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + + if TrimRightSlashEnabled { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } else { + return path.Join(rootPath, routePath) + } +} + +var anonymousFuncCount int32 + +// nameOfFunction returns the short name of the function f for documentation. +// It uses a runtime feature for debugging ; its value may change for later Go versions. +func nameOfFunction(f interface{}) string { + fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) + tokenized := strings.Split(fun.Name(), ".") + last := tokenized[len(tokenized)-1] + last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 + last = strings.TrimSuffix(last, ")-fm") // Go 1.5 + last = strings.TrimSuffix(last, "·fm") // < Go 1.5 + last = strings.TrimSuffix(last, "-fm") // Go 1.5 + if last == "func1" { // this could mean conflicts in API docs + val := atomic.AddInt32(&anonymousFuncCount, 1) + last = "func" + fmt.Sprintf("%d", val) + atomic.StoreInt32(&anonymousFuncCount, val) + } + return last +} diff --git a/vendor/github.com/emicklei/go-restful/v3/route_reader.go b/vendor/github.com/emicklei/go-restful/v3/route_reader.go new file mode 100644 index 000000000..c9f4ee75f --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/route_reader.go @@ -0,0 +1,66 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +type RouteReader interface { + Method() string + Consumes() []string + Path() string + Doc() string + Notes() string + Operation() string + ParameterDocs() []*Parameter + // Returns a copy + Metadata() map[string]interface{} + Deprecated() bool +} + +type routeAccessor struct { + route *Route +} + +func (r routeAccessor) Method() string { + return r.route.Method +} +func (r routeAccessor) Consumes() []string { + return r.route.Consumes[:] +} +func (r routeAccessor) Path() string { + return r.route.Path +} +func (r routeAccessor) Doc() string { + return r.route.Doc +} +func (r routeAccessor) Notes() string { + return r.route.Notes +} +func (r routeAccessor) Operation() string { + return r.route.Operation +} +func (r routeAccessor) ParameterDocs() []*Parameter { + return r.route.ParameterDocs[:] +} + +// Returns a copy +func (r routeAccessor) Metadata() map[string]interface{} { + return copyMap(r.route.Metadata) +} +func (r routeAccessor) Deprecated() bool { + return r.route.Deprecated +} + +// https://stackoverflow.com/questions/23057785/how-to-copy-a-map +func copyMap(m map[string]interface{}) map[string]interface{} { + cp := make(map[string]interface{}) + for k, v := range m { + vm, ok := v.(map[string]interface{}) + if ok { + cp[k] = copyMap(vm) + } else { + cp[k] = v + } + } + return cp +} diff --git a/vendor/github.com/emicklei/go-restful/v3/router.go b/vendor/github.com/emicklei/go-restful/v3/router.go new file mode 100644 index 000000000..19078af1c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/router.go @@ -0,0 +1,20 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import "net/http" + +// A RouteSelector finds the best matching Route given the input HTTP Request +// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the +// path parameters after the route has been selected. +type RouteSelector interface { + + // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. + // It returns a selected Route and its containing WebService or an error indicating + // a problem. + SelectRoute( + webServices []*WebService, + httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) +} diff --git a/vendor/github.com/emicklei/go-restful/v3/service_error.go b/vendor/github.com/emicklei/go-restful/v3/service_error.go new file mode 100644 index 000000000..a41575469 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/service_error.go @@ -0,0 +1,32 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "fmt" + "net/http" +) + +// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. +type ServiceError struct { + Code int + Message string + Header http.Header +} + +// NewError returns a ServiceError using the code and reason +func NewError(code int, message string) ServiceError { + return ServiceError{Code: code, Message: message} +} + +// NewErrorWithHeader returns a ServiceError using the code, reason and header +func NewErrorWithHeader(code int, message string, header http.Header) ServiceError { + return ServiceError{Code: code, Message: message, Header: header} +} + +// Error returns a text representation of the service error +func (s ServiceError) Error() string { + return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) +} diff --git a/vendor/github.com/emicklei/go-restful/v3/web_service.go b/vendor/github.com/emicklei/go-restful/v3/web_service.go new file mode 100644 index 000000000..789c4df25 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/web_service.go @@ -0,0 +1,305 @@ +package restful + +import ( + "errors" + "os" + "reflect" + "sync" + + "github.com/emicklei/go-restful/v3/log" +) + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. +type WebService struct { + rootPath string + pathExpr *pathExpression // cached compilation of rootPath as RegExp + routes []Route + produces []string + consumes []string + pathParameters []*Parameter + filters []FilterFunction + documentation string + apiVersion string + + typeNameHandleFunc TypeNameHandleFunction + + dynamicRoutes bool + + // protects 'routes' if dynamic routes are enabled + routesLock sync.RWMutex +} + +func (w *WebService) SetDynamicRoutes(enable bool) { + w.dynamicRoutes = enable +} + +// TypeNameHandleFunction declares functions that can handle translating the name of a sample object +// into the restful documentation for the service. +type TypeNameHandleFunction func(sample interface{}) string + +// TypeNameHandler sets the function that will convert types to strings in the parameter +// and model definitions. If not set, the web service will invoke +// reflect.TypeOf(object).String(). +func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService { + w.typeNameHandleFunc = handler + return w +} + +// reflectTypeName is the default TypeNameHandleFunction and for a given object +// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via +// the reflection API. +func reflectTypeName(sample interface{}) string { + return reflect.TypeOf(sample).String() +} + +// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. +func (w *WebService) compilePathExpression() { + compiled, err := newPathExpression(w.rootPath) + if err != nil { + log.Printf("invalid path:%s because:%v", w.rootPath, err) + os.Exit(1) + } + w.pathExpr = compiled +} + +// ApiVersion sets the API version for documentation purposes. +func (w *WebService) ApiVersion(apiVersion string) *WebService { + w.apiVersion = apiVersion + return w +} + +// Version returns the API version for documentation purposes. +func (w *WebService) Version() string { return w.apiVersion } + +// Path specifies the root URL template path of the WebService. +// All Routes will be relative to this path. +func (w *WebService) Path(root string) *WebService { + w.rootPath = root + if len(w.rootPath) == 0 { + w.rootPath = "/" + } + w.compilePathExpression() + return w +} + +// Param adds a PathParameter to document parameters used in the root path. +func (w *WebService) Param(parameter *Parameter) *WebService { + if w.pathParameters == nil { + w.pathParameters = []*Parameter{} + } + w.pathParameters = append(w.pathParameters, parameter) + return w +} + +// PathParameter creates a new Parameter of kind Path for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) PathParameter(name, description string) *Parameter { + return PathParameter(name, description) +} + +// PathParameter creates a new Parameter of kind Path for documentation purposes. +// It is initialized as required with string as its DataType. +func PathParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} + p.bePath() + return p +} + +// QueryParameter creates a new Parameter of kind Query for documentation purposes. +// It is initialized as not required with string as its DataType. +func (w *WebService) QueryParameter(name, description string) *Parameter { + return QueryParameter(name, description) +} + +// QueryParameter creates a new Parameter of kind Query for documentation purposes. +// It is initialized as not required with string as its DataType. +func QueryParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} + p.beQuery() + return p +} + +// BodyParameter creates a new Parameter of kind Body for documentation purposes. +// It is initialized as required without a DataType. +func (w *WebService) BodyParameter(name, description string) *Parameter { + return BodyParameter(name, description) +} + +// BodyParameter creates a new Parameter of kind Body for documentation purposes. +// It is initialized as required without a DataType. +func BodyParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} + p.beBody() + return p +} + +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. +// It is initialized as not required with string as its DataType. +func (w *WebService) HeaderParameter(name, description string) *Parameter { + return HeaderParameter(name, description) +} + +// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. +// It is initialized as not required with string as its DataType. +func HeaderParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beHeader() + return p +} + +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) FormParameter(name, description string) *Parameter { + return FormParameter(name, description) +} + +// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. +// It is initialized as required with string as its DataType. +func FormParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beForm() + return p +} + +// MultiPartFormParameter creates a new Parameter of kind Form (using multipart/form-data) for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) MultiPartFormParameter(name, description string) *Parameter { + return MultiPartFormParameter(name, description) +} + +func MultiPartFormParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beMultiPartForm() + return p +} + +// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. +func (w *WebService) Route(builder *RouteBuilder) *WebService { + w.routesLock.Lock() + defer w.routesLock.Unlock() + builder.copyDefaults(w.produces, w.consumes) + w.routes = append(w.routes, builder.Build()) + return w +} + +// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' +func (w *WebService) RemoveRoute(path, method string) error { + if !w.dynamicRoutes { + return errors.New("dynamic routes are not enabled.") + } + w.routesLock.Lock() + defer w.routesLock.Unlock() + newRoutes := []Route{} + for _, route := range w.routes { + if route.Method == method && route.Path == path { + continue + } + newRoutes = append(newRoutes, route) + } + w.routes = newRoutes + return nil +} + +// Method creates a new RouteBuilder and initialize its http method +func (w *WebService) Method(httpMethod string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod) +} + +// Produces specifies that this WebService can produce one or more MIME types. +// Http requests must have one of these values set for the Accept header. +func (w *WebService) Produces(contentTypes ...string) *WebService { + w.produces = contentTypes + return w +} + +// Consumes specifies that this WebService can consume one or more MIME types. +// Http requests must have one of these values set for the Content-Type header. +func (w *WebService) Consumes(accepts ...string) *WebService { + w.consumes = accepts + return w +} + +// Routes returns the Routes associated with this WebService +func (w *WebService) Routes() []Route { + if !w.dynamicRoutes { + return w.routes + } + // Make a copy of the array to prevent concurrency problems + w.routesLock.RLock() + defer w.routesLock.RUnlock() + result := make([]Route, len(w.routes)) + for ix := range w.routes { + result[ix] = w.routes[ix] + } + return result +} + +// RootPath returns the RootPath associated with this WebService. Default "/" +func (w *WebService) RootPath() string { + return w.rootPath +} + +// PathParameters return the path parameter names for (shared among its Routes) +func (w *WebService) PathParameters() []*Parameter { + return w.pathParameters +} + +// Filter adds a filter function to the chain of filters applicable to all its Routes +func (w *WebService) Filter(filter FilterFunction) *WebService { + w.filters = append(w.filters, filter) + return w +} + +// Doc is used to set the documentation of this service. +func (w *WebService) Doc(plainText string) *WebService { + w.documentation = plainText + return w +} + +// Documentation returns it. +func (w *WebService) Documentation() string { + return w.documentation +} + +/* + Convenience methods +*/ + +// HEAD is a shortcut for .Method("HEAD").Path(subPath) +func (w *WebService) HEAD(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath) +} + +// GET is a shortcut for .Method("GET").Path(subPath) +func (w *WebService) GET(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath) +} + +// POST is a shortcut for .Method("POST").Path(subPath) +func (w *WebService) POST(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath) +} + +// PUT is a shortcut for .Method("PUT").Path(subPath) +func (w *WebService) PUT(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath) +} + +// PATCH is a shortcut for .Method("PATCH").Path(subPath) +func (w *WebService) PATCH(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath) +} + +// DELETE is a shortcut for .Method("DELETE").Path(subPath) +func (w *WebService) DELETE(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) +} + +// OPTIONS is a shortcut for .Method("OPTIONS").Path(subPath) +func (w *WebService) OPTIONS(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("OPTIONS").Path(subPath) +} diff --git a/vendor/github.com/emicklei/go-restful/v3/web_service_container.go b/vendor/github.com/emicklei/go-restful/v3/web_service_container.go new file mode 100644 index 000000000..c9d31b06c --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/web_service_container.go @@ -0,0 +1,39 @@ +package restful + +// Copyright 2013 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "net/http" +) + +// DefaultContainer is a restful.Container that uses http.DefaultServeMux +var DefaultContainer *Container + +func init() { + DefaultContainer = NewContainer() + DefaultContainer.ServeMux = http.DefaultServeMux +} + +// If set the true then panics will not be caught to return HTTP 500. +// In that case, Route functions are responsible for handling any error situation. +// Default value is false = recover from panics. This has performance implications. +// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) +var DoNotRecover = false + +// Add registers a new WebService add it to the DefaultContainer. +func Add(service *WebService) { + DefaultContainer.Add(service) +} + +// Filter appends a container FilterFunction from the DefaultContainer. +// These are called before dispatching a http.Request to a WebService. +func Filter(filter FilterFunction) { + DefaultContainer.Filter(filter) +} + +// RegisteredWebServices returns the collections of WebServices from the DefaultContainer +func RegisteredWebServices() []*WebService { + return DefaultContainer.RegisteredWebServices() +} diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 000000000..f1c181ec9 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 000000000..38cb9ae10 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..c794b2b0c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 000000000..de0965e12 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 000000000..eaa850492 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 000000000..af0a79507 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 000000000..9c05146d1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 000000000..823bff12c --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 000000000..ea0f39e24 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 000000000..ec038a49e --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 000000000..85842ac73 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 000000000..44afb8660 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 000000000..23f68b984 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 000000000..6508e291d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 000000000..8b4b4bbc5 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 000000000..31c39336d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 000000000..de175cee4 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 000000000..507ab6c18 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 000000000..81228acf0 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 000000000..5c4d2b7a4 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 000000000..b40793b95 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801df..0cffafa7b 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ad825f5f0..7c7f0c69c 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,8 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging without becoming coupled to a particular logging implementation. This is not @@ -73,6 +75,30 @@ received: If the Go standard library had defined an interface for logging, this project probably would not be needed. Alas, here we are. +When the Go developers started developing such an interface with +[slog](https://github.com/golang/go/issues/56345), they adopted some of the +logr design but also left out some parts and changed others: + +| Feature | logr | slog | +|---------|------|------| +| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | +| Low-level API | `LogSink` | `Handler` | +| Stack unwinding | done by `LogSink` | done by `Logger` | +| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | +| Generating a value for logging on demand | `Marshaler` | `LogValuer` | +| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. + ### Inspiration Before you consider this package, please read [this blog post by the @@ -105,14 +131,115 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. + +### Using a `logr.LogSink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +### Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +### Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ @@ -237,7 +364,9 @@ Otherwise, you can start out with `0` as "you always want to see this", Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). #### How do I choose my keys? diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 000000000..1ca756fc7 --- /dev/null +++ b/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 000000000..de8bcc3ad --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 000000000..f012f9a18 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 000000000..065ef0b82 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 9d92a38f1..99fe8be93 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 44cd398c9..b4428e105 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,22 +30,28 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional -// information (such as stack traces) on calls to Error(). +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -53,22 +59,25 @@ limitations under the License. // Log-lines with V-levels that are not enabled (as per the LogSink) will not // be written. Level V(0) is the default, and logger.V(0).Info() has the same // meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -79,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -112,15 +123,24 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // -// Key Naming Conventions +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -129,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -184,16 +207,15 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -232,7 +254,13 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -241,8 +269,11 @@ func (l Logger) Enabled() bool { // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { - if l.Enabled() { +func (l Logger) Info(msg string, keysAndValues ...any) { + if l.sink == nil { + return + } + if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -253,12 +284,17 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // Error logs an error, with the given message and key/value pairs as context. // It functions similarly to Info, but may have unique behavior, and should be // preferred for logging errors (see the package documentations for more -// information). +// information). The log message will always be emitted, regardless of +// verbosity level. // // The msg argument should be used to add context to any underlying error, // while the err argument should be used to attach the actual error that -// triggered this log line, if present. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...any) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -270,6 +306,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -277,9 +316,19 @@ func (l Logger) V(level int) Logger { return l } +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { +func (l Logger) WithValues(keysAndValues ...any) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -290,6 +339,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -310,6 +362,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -331,6 +386,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -343,43 +401,9 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil } // RuntimeInfo holds information that the logr "core" library knows which @@ -413,22 +437,22 @@ type LogSink interface { // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. - Info(level int, msg string, keysAndValues ...interface{}) + Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink + WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -453,7 +477,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. @@ -492,5 +516,5 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go new file mode 100644 index 000000000..82d1ba494 --- /dev/null +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -0,0 +1,192 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(ToSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because FromSlogHandler needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + } + clone.sink = l.sink.WithValues(kvList...) + } + return &clone +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink + } else { + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) + } + + return kvList +} + +func addPrefix(prefix, name string) string { + if prefix == "" { + return name + } + if name == "" { + return prefix + } + return prefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original Logger had a V level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 000000000..28a83d024 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go new file mode 100644 index 000000000..4060fcbc2 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -0,0 +1,120 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" + "runtime" + "time" +) + +var ( + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewFromLogHandler. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + _ = l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) LogSink { + if l.name != "" { + l.name += "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig new file mode 100644 index 000000000..3152da69a --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore new file mode 100644 index 000000000..769c24400 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE similarity index 100% rename from vendor/google.golang.org/appengine/LICENSE rename to vendor/github.com/go-openapi/jsonpointer/LICENSE diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 000000000..813788aff --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,15 @@ +# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 000000000..7df9853de --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,390 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` + + invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator +) + +var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() +var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (interface{}, error) +} + +// JSONSetable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONSetable interface { + JSONSet(string, interface{}) error +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + + var p Pointer + err := p.parse(jsonPointerString) + return p, err + +} + +// Pointer the json pointer reprsentation +type Pointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.New(invalidStart) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } + } + } + + return err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + return p.get(document, swag.DefaultJSONNameProvider) +} + +// Set uses the pointer to set a value from a JSON document +func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { + return document, p.set(document, value, swag.DefaultJSONNameProvider) +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) +} + +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { + return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) +} + +func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + } + + switch kind { + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if mv.IsValid() { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + switch rValue.Kind() { + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node, kind = r, knd + + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { + knd := reflect.ValueOf(node).Kind() + + if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + } + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + // Full document when empty + if len(p.referenceTokens) == 0 { + return nil + } + + lastI := len(p.referenceTokens) - 1 + for i, token := range p.referenceTokens { + isLastToken := i == lastI + decodedToken := Unescape(token) + + if isLastToken { + + return setSingleImpl(node, data, decodedToken, nameProvider) + } + + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = r + continue + } + + switch kind { + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = fld.Interface() + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if !mv.IsValid() { + return fmt.Errorf("object has no key %q", decodedToken) + } + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + node = mv.Addr().Interface() + continue + } + node = mv.Interface() + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + node = elem.Addr().Interface() + continue + } + node = elem.Interface() + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + + } + + return nil +} + +// DecodedTokens returns the decoded tokens +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) + step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + return step2 +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) + step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + return step2 +} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore new file mode 100644 index 000000000..769c24400 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml new file mode 100644 index 000000000..013fc1943 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -0,0 +1,50 @@ +linters-settings: + govet: + check-shadowing: true + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + paralleltest: + ignore-missing: true +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - godox + - gocognit + - whitespace + - wsl + - funlen + - gochecknoglobals + - gochecknoinits + - scopelint + - wrapcheck + - exhaustivestruct + - exhaustive + - nlreturn + - testpackage + - gci + - gofumpt + - goerr113 + - gomnd + - tparallel + - nestif + - godot + - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 000000000..b94753aa5 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,15 @@ +# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) +An implementation of JSON Reference - Go language + +## Status +Feature complete. Stable API + +## Dependencies +https://github.com/go-openapi/jsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 000000000..f0610cf1e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,69 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 000000000..cfdef03e5 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,158 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" +) + +const ( + fragmentRune = `#` +) + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + + var r Ref + err := r.parse(jsonReferenceString) + return r, err + +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + return r +} + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + internal.NormalizeURL(parsed) + + r.referenceURL = parsed + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, errors.New("child url is nil") + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig new file mode 100644 index 000000000..3152da69a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 000000000..49ad52766 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore new file mode 100644 index 000000000..d69b53acc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +vendor +Godeps +.idea diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml new file mode 100644 index 000000000..bf503e400 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -0,0 +1,54 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 25 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 3 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoinits + - gochecknoglobals + - nlreturn + - testpackage + - wrapcheck + - gomnd + - exhaustive + - exhaustivestruct + - goerr113 + - wsl + - whitespace + - gofumpt + - godot + - nestif + - godox + - funlen + - gci + - gocognit + - paralleltest + - thelper + - ifshort + - gomoddirectives + - cyclop + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv + - golint + - exhaustruct + - nilnil + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md new file mode 100644 index 000000000..217f6fa50 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/README.md @@ -0,0 +1,21 @@ +# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) + +Contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + +* convert between value and pointers for builtin types +* convert from string to builtin types (wraps strconv) +* fast json concatenation +* search in path +* load from file or http +* name mangling + + +This repo has only few dependencies outside of the standard library: + +* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go new file mode 100644 index 000000000..fc085aeb8 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -0,0 +1,208 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 + epsilon float64 = 1e-9 +) + +// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + fa := math.Abs(f) + g := float64(uint64(f)) + ga := math.Abs(g) + + diff := math.Abs(f - g) + + // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases + switch { + case f == g: // best case + return true + case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case + return true + case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values + return diff < (epsilon * math.SmallestNonzeroFloat64) + } + // check the relative error + return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon +} + +var evaluatesAsTrue map[string]struct{} + +func init() { + evaluatesAsTrue = map[string]struct{}{ + "true": {}, + "1": {}, + "yes": {}, + "ok": {}, + "y": {}, + "on": {}, + "selected": {}, + "checked": {}, + "t": {}, + "enabled": {}, + } +} + +// ConvertBool turn a string into a boolean +func ConvertBool(str string) (bool, error) { + _, ok := evaluatesAsTrue[strings.ToLower(str)] + return ok, nil +} + +// ConvertFloat32 turn a string into a float32 +func ConvertFloat32(str string) (float32, error) { + f, err := strconv.ParseFloat(str, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// ConvertFloat64 turn a string into a float64 +func ConvertFloat64(str string) (float64, error) { + return strconv.ParseFloat(str, 64) +} + +// ConvertInt8 turn a string into an int8 +func ConvertInt8(str string) (int8, error) { + i, err := strconv.ParseInt(str, 10, 8) + if err != nil { + return 0, err + } + return int8(i), nil +} + +// ConvertInt16 turn a string into an int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turn a string into an int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turn a string into an int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turn a string into an uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turn a string into an uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turn a string into an uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turn a string into an uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} + +// FormatBool turns a boolean into a string +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} + +// FormatFloat32 turns a float32 into a string +func FormatFloat32(value float32) string { + return strconv.FormatFloat(float64(value), 'f', -1, 32) +} + +// FormatFloat64 turns a float64 into a string +func FormatFloat64(value float64) string { + return strconv.FormatFloat(value, 'f', -1, 64) +} + +// FormatInt8 turns an int8 into a string +func FormatInt8(value int8) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt16 turns an int16 into a string +func FormatInt16(value int16) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt32 turns an int32 into a string +func FormatInt32(value int32) string { + return strconv.Itoa(int(value)) +} + +// FormatInt64 turns an int64 into a string +func FormatInt64(value int64) string { + return strconv.FormatInt(value, 10) +} + +// FormatUint8 turns an uint8 into a string +func FormatUint8(value uint8) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint16 turns an uint16 into a string +func FormatUint16(value uint16) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint32 turns an uint32 into a string +func FormatUint32(value uint32) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint64 turns an uint64 into a string +func FormatUint64(value uint64) string { + return strconv.FormatUint(value, 10) +} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go new file mode 100644 index 000000000..c49cc473a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert_types.go @@ -0,0 +1,730 @@ +package swag + +import "time" + +// This file was taken from the aws go sdk + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to of the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to of the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Uint returns a pointer to of the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values into a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers into a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values into a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers into a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to of the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to of the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to of the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go new file mode 100644 index 000000000..55094cb74 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -0,0 +1,31 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. + +You may also use it standalone for your projects. + + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling + +This repo has only few dependencies outside of the standard library: + + - YAML utilities depend on gopkg.in/yaml.v2 +*/ +package swag diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 000000000..16accc55f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go new file mode 100644 index 000000000..7e9902ca3 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/json.go @@ -0,0 +1,312 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "bytes" + "encoding/json" + "log" + "reflect" + "strings" + "sync" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + +// DefaultJSONNameProvider the default cache for types +var DefaultJSONNameProvider = NewNameProvider() + +const comma = byte(',') + +var closers map[byte]byte + +func init() { + closers = map[byte]byte{ + '{': '}', + '[': ']', + } +} + +type ejMarshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +type ejUnmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler +// so it takes the fastest option available. +func WriteJSON(data interface{}) ([]byte, error) { + if d, ok := data.(ejMarshaler); ok { + jw := new(jwriter.Writer) + d.MarshalEasyJSON(jw) + return jw.BuildBytes() + } + if d, ok := data.(json.Marshaler); ok { + return d.MarshalJSON() + } + return json.Marshal(data) +} + +// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler +// so it takes the fastest option available +func ReadJSON(data []byte, value interface{}) error { + trimmedData := bytes.Trim(data, "\x00") + if d, ok := value.(ejUnmarshaler); ok { + jl := &jlexer.Lexer{Data: trimmedData} + d.UnmarshalEasyJSON(jl) + return jl.Error() + } + if d, ok := value.(json.Unmarshaler); ok { + return d.UnmarshalJSON(trimmedData) + } + return json.Unmarshal(trimmedData, value) +} + +// DynamicJSONToStruct converts an untyped json structure into a struct +func DynamicJSONToStruct(data interface{}, target interface{}) error { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := WriteJSON(data) + if err != nil { + return err + } + return ReadJSON(b, target) +} + +// ConcatJSON concatenates multiple json objects efficiently +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last-- + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { + return blobs[0] + } + + var opening, closing byte + var idx, a int + buf := bytes.NewBuffer(nil) + + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + if len(b) < 3 { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + if err := buf.WriteByte(comma); err != nil { + log.Println(err) + } + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + if _, err := buf.Write(b[idx : len(b)-1]); err != nil { + log.Println(err) + } + } else { // last one, strip only the leading bracket + if _, err := buf.Write(b[idx:]); err != nil { + log.Println(err) + } + } + a++ + } + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 { + if err := buf.WriteByte(opening); err != nil { + log.Println(err) + } + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } + } + return buf.Bytes() +} + +// ToDynamicJSON turns an object into a properly JSON typed structure +func ToDynamicJSON(data interface{}) interface{} { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } + var res interface{} + if err := json.Unmarshal(b, &res); err != nil { + log.Println(err) + } + return res +} + +// FromDynamicJSON turns an object into a properly JSON typed structure +func FromDynamicJSON(data, target interface{}) error { + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } + return json.Unmarshal(b, target) +} + +// NameProvider represents an object capable of translating from go property names +// to json property names +// This type is thread-safe. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject interface{}) []string { + n.lock.Lock() + defer n.lock.Unlock() + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + res := make([]string, 0, len(names.jsonNames)) + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go new file mode 100644 index 000000000..00038c377 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -0,0 +1,121 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +// LoadHTTPTimeout the default timeout for load requests +var LoadHTTPTimeout = 30 * time.Second + +// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth +var LoadHTTPBasicAuthUsername = "" + +// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth +var LoadHTTPBasicAuthPassword = "" + +// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests +var LoadHTTPCustomHeaders = map[string]string{} + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in +func LoadFromFileOrHTTP(path string) ([]byte, error) { + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +} + +// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in +// timeout arg allows for per request overriding of the request timeout +func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +} + +// LoadStrategy returns a loader function for a given path or uri +func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(path, "http") { + return remote + } + return func(pth string) ([]byte, error) { + upth, err := pathUnescape(pth) + if err != nil { + return nil, err + } + + if strings.HasPrefix(pth, `file://`) { + if runtime.GOOS == "windows" { + // support for canonical file URIs on windows. + // Zero tolerance here for dodgy URIs. + u, _ := url.Parse(upth) + if u.Host != "" { + // assume UNC name (volume share) + // file://host/share/folder\... ==> \\host\share\path\folder + // NOTE: UNC port not yet supported + upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) + } else { + // file:///c:/folder/... ==> just remove the leading slash + upth = strings.TrimPrefix(upth, `file:///`) + } + } else { + upth = strings.TrimPrefix(upth, `file://`) + } + } + + return local(filepath.FromSlash(upth)) + } +} + +func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { + return func(path string) ([]byte, error) { + client := &http.Client{Timeout: timeout} + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx + if err != nil { + return nil, err + } + + if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" { + req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword) + } + + for key, val := range LoadHTTPCustomHeaders { + req.Header.Set(key, val) + } + + resp, err := client.Do(req) + defer func() { + if resp != nil { + if e := resp.Body.Close(); e != nil { + log.Println(e) + } + } + }() + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + } + + return io.ReadAll(resp.Body) + } +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go new file mode 100644 index 000000000..aa7f6a9bb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "unicode" + +type ( + nameLexem interface { + GetUnsafeGoName() string + GetOriginal() string + IsInitialism() bool + } + + initialismNameLexem struct { + original string + matchedInitialism string + } + + casualNameLexem struct { + original string + } +) + +func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { + return &initialismNameLexem{ + original: original, + matchedInitialism: matchedInitialism, + } +} + +func newCasualNameLexem(original string) *casualNameLexem { + return &casualNameLexem{ + original: original, + } +} + +func (l *initialismNameLexem) GetUnsafeGoName() string { + return l.matchedInitialism +} + +func (l *casualNameLexem) GetUnsafeGoName() string { + var first rune + var rest string + for i, orig := range l.original { + if i == 0 { + first = orig + continue + } + if i > 0 { + rest = l.original[i:] + break + } + } + if len(l.original) > 1 { + return string(unicode.ToUpper(first)) + lower(rest) + } + + return l.original +} + +func (l *initialismNameLexem) GetOriginal() string { + return l.original +} + +func (l *casualNameLexem) GetOriginal() string { + return l.original +} + +func (l *initialismNameLexem) IsInitialism() bool { + return true +} + +func (l *casualNameLexem) IsInitialism() bool { + return false +} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go new file mode 100644 index 000000000..821235f84 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/net.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "net" + "strconv" +) + +// SplitHostPort splits a network address into a host and a port. +// The port is -1 when there is no port to be found +func SplitHostPort(addr string) (host string, port int, err error) { + h, p, err := net.SplitHostPort(addr) + if err != nil { + return "", -1, err + } + if p == "" { + return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} + } + + pi, err := strconv.Atoi(p) + if err != nil { + return "", -1, err + } + return h, pi, nil +} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go new file mode 100644 index 000000000..941bd0176 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/path.go @@ -0,0 +1,59 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +const ( + // GOPATHKey represents the env key for gopath + GOPATHKey = "GOPATH" +) + +// FindInSearchPath finds a package in a provided lists of paths +func FindInSearchPath(searchPath, pkg string) string { + pathsList := filepath.SplitList(searchPath) + for _, path := range pathsList { + if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { + if _, err := os.Stat(evaluatedPath); err == nil { + return evaluatedPath + } + } + } + return "" +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +func FindInGoSearchPath(pkg string) string { + return FindInSearchPath(FullGoSearchPath(), pkg) +} + +// FullGoSearchPath gets the search paths for finding packages +func FullGoSearchPath() string { + allPaths := os.Getenv(GOPATHKey) + if allPaths == "" { + allPaths = filepath.Join(os.Getenv("HOME"), "go") + } + if allPaths != "" { + allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") + } else { + allPaths = runtime.GOROOT() + } + return allPaths +} diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go new file mode 100644 index 000000000..f5228b82c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.8 +// +build go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go new file mode 100644 index 000000000..7c7da9c08 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.9 +// +build go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, value interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go new file mode 100644 index 000000000..2757d9b95 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.8 +// +build !go1.8 + +package swag + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go new file mode 100644 index 000000000..0565db377 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -0,0 +1,70 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.9 +// +build !go1.9 + +package swag + +import ( + "sort" + "sync" +) + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Before go1.9, this may be implemented with a mutex on the map. +type indexOfInitialisms struct { + getMutex *sync.Mutex + index map[string]bool +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + getMutex: new(sync.Mutex), + index: make(map[string]bool, 50), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k, v := range initial { + m.index[k] = v + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + m.getMutex.Lock() + defer m.getMutex.Unlock() + _, ok := m.index[key] + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.getMutex.Lock() + defer m.getMutex.Unlock() + m.index[key] = true + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.getMutex.Lock() + defer m.getMutex.Unlock() + for k := range m.index { + result = append(result, k) + } + sort.Sort(sort.Reverse(byInitialism(result))) + return +} diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go new file mode 100644 index 000000000..a1825fb7d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/split.go @@ -0,0 +1,262 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "unicode" +) + +var nameReplaceTable = map[rune]string{ + '@': "At ", + '&': "And ", + '|': "Pipe ", + '$': "Dollar ", + '!': "Bang ", + '-': "", + '_': "", +} + +type ( + splitter struct { + postSplitInitialismCheck bool + initialisms []string + } + + splitterOption func(*splitter) *splitter +) + +// split calls the splitter; splitter provides more control and post options +func split(str string) []string { + lexems := newSplitter().split(str) + result := make([]string, 0, len(lexems)) + + for _, lexem := range lexems { + result = append(result, lexem.GetOriginal()) + } + + return result + +} + +func (s *splitter) split(str string) []nameLexem { + return s.toNameLexems(str) +} + +func newSplitter(options ...splitterOption) *splitter { + splitter := &splitter{ + postSplitInitialismCheck: false, + initialisms: initialisms, + } + + for _, option := range options { + splitter = option(splitter) + } + + return splitter +} + +// withPostSplitInitialismCheck allows to catch initialisms after main split process +func withPostSplitInitialismCheck(s *splitter) *splitter { + s.postSplitInitialismCheck = true + return s +} + +type ( + initialismMatch struct { + start, end int + body []rune + complete bool + } + initialismMatches []*initialismMatch +) + +func (s *splitter) toNameLexems(name string) []nameLexem { + nameRunes := []rune(name) + matches := s.gatherInitialismMatches(nameRunes) + return s.mapMatchesToNameLexems(nameRunes, matches) +} + +func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { + matches := make(initialismMatches, 0) + + for currentRunePosition, currentRune := range nameRunes { + newMatches := make(initialismMatches, 0, len(matches)) + + // check current initialism matches + for _, match := range matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + newMatches = append(newMatches, match) + continue + } + + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if !s.initialismRuneEqual(currentMatchRune, currentRune) { + continue + } + + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } + } + + match.complete = true + match.end = currentRunePosition + } + + newMatches = append(newMatches, match) + } + + // check for new initialism matches + for _, initialism := range s.initialisms { + initialismRunes := []rune(initialism) + if s.initialismRuneEqual(initialismRunes[0], currentRune) { + newMatches = append(newMatches, &initialismMatch{ + start: currentRunePosition, + body: initialismRunes, + complete: false, + }) + } + } + + matches = newMatches + } + + return matches +} + +func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { + nameLexems := make([]nameLexem, 0) + + var lastAcceptedMatch *initialismMatch + for _, match := range matches { + if !match.complete { + continue + } + + if firstMatch := lastAcceptedMatch == nil; firstMatch { + nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + + continue + } + + if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch { + continue + } + + middle := nameRunes[lastAcceptedMatch.end+1 : match.start] + nameLexems = append(nameLexems, s.breakCasualString(middle)...) + nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + + lastAcceptedMatch = match + } + + // we have not found any accepted matches + if lastAcceptedMatch == nil { + return s.breakCasualString(nameRunes) + } + + if lastAcceptedMatch.end+1 != len(nameRunes) { + rest := nameRunes[lastAcceptedMatch.end+1:] + nameLexems = append(nameLexems, s.breakCasualString(rest)...) + } + + return nameLexems +} + +func (s *splitter) initialismRuneEqual(a, b rune) bool { + return a == b +} + +func (s *splitter) breakInitialism(original string) nameLexem { + return newInitialismNameLexem(original, original) +} + +func (s *splitter) breakCasualString(str []rune) []nameLexem { + segments := make([]nameLexem, 0) + currentSegment := "" + + addCasualNameLexem := func(original string) { + segments = append(segments, newCasualNameLexem(original)) + } + + addInitialismNameLexem := func(original, match string) { + segments = append(segments, newInitialismNameLexem(original, match)) + } + + addNameLexem := func(original string) { + if s.postSplitInitialismCheck { + for _, initialism := range s.initialisms { + if upper(initialism) == upper(original) { + addInitialismNameLexem(original, initialism) + return + } + } + } + + addCasualNameLexem(original) + } + + for _, rn := range string(str) { + if replace, found := nameReplaceTable[rn]; found { + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" + } + + if replace != "" { + addNameLexem(replace) + } + + continue + } + + if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { + if currentSegment != "" { + addNameLexem(currentSegment) + currentSegment = "" + } + + continue + } + + if unicode.IsUpper(rn) { + if currentSegment != "" { + addNameLexem(currentSegment) + } + currentSegment = "" + } + + currentSegment += string(rn) + } + + if currentSegment != "" { + addNameLexem(currentSegment) + } + + return segments +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go new file mode 100644 index 000000000..d971fbe34 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/util.go @@ -0,0 +1,394 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "reflect" + "strings" + "unicode" +) + +// commonInitialisms are common acronyms that are kept as whole uppercased words. +var commonInitialisms *indexOfInitialisms + +// initialisms is a slice of sorted initialisms +var initialisms []string + +var isInitialism func(string) bool + +// GoNamePrefixFunc sets an optional rule to prefix go names +// which do not start with a letter. +// +// e.g. to help convert "123" into "{prefix}123" +// +// The default is to prefix with "X" +var GoNamePrefixFunc func(string) string + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + var configuredInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +const ( + // collectionFormatComma = "csv" + collectionFormatSpace = "ssv" + collectionFormatTab = "tsv" + collectionFormatPipe = "pipes" + collectionFormatMulti = "multi" +) + +// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return data + default: + sep = "," + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case collectionFormatSpace: + sep = " " + case collectionFormatTab: + sep = "\t" + case collectionFormatPipe: + sep = "|" + case collectionFormatMulti: + return nil + default: + sep = "," + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} + +// Removes leading whitespaces +func trim(str string) string { + return strings.Trim(str, " ") +} + +// Shortcut to strings.ToUpper() +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// Shortcut to strings.ToLower() +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// Camelize an uppercased word +func Camelize(word string) (camelized string) { + for pos, ru := range []rune(word) { + if pos > 0 { + camelized += string(unicode.ToLower(ru)) + } else { + camelized += string(unicode.ToUpper(ru)) + } + } + return +} + +// ToFileName lowercases and underscores a go type name +func ToFileName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + + return strings.Join(out, "_") +} + +// ToCommandName lowercases and underscores a go type name +func ToCommandName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + out = append(out, lower(w)) + } + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human series of words +func ToHumanNameLower(name string) string { + in := newSplitter(withPostSplitInitialismCheck).split(name) + out := make([]string, 0, len(in)) + + for _, w := range in { + if !w.IsInitialism() { + out = append(out, lower(w.GetOriginal())) + } else { + out = append(out, w.GetOriginal()) + } + } + + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized +func ToHumanNameTitle(name string) string { + in := newSplitter(withPostSplitInitialismCheck).split(name) + + out := make([]string, 0, len(in)) + for _, w := range in { + original := w.GetOriginal() + if !w.IsInitialism() { + out = append(out, Camelize(original)) + } else { + out = append(out, original) + } + } + return strings.Join(out, " ") +} + +// ToJSONName camelcases a name which can be underscored or pascal cased +func ToJSONName(name string) string { + in := split(name) + out := make([]string, 0, len(in)) + + for i, w := range in { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, Camelize(w)) + } + return strings.Join(out, "") +} + +// ToVarName camelcases a name which can be underscored or pascal cased +func ToVarName(name string) string { + res := ToGoName(name) + if isInitialism(res) { + return lower(res) + } + if len(res) <= 1 { + return lower(res) + } + return lower(res[:1]) + res[1:] +} + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes +func ToGoName(name string) string { + lexems := newSplitter(withPostSplitInitialismCheck).split(name) + + result := "" + for _, lexem := range lexems { + goName := lexem.GetUnsafeGoName() + + // to support old behavior + if lexem.IsInitialism() { + goName = upper(goName) + } + result += goName + } + + if len(result) > 0 { + // Only prefix with X when the first character isn't an ascii letter + first := []rune(result)[0] + if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { + if GoNamePrefixFunc == nil { + return "X" + result + } + result = GoNamePrefixFunc(name) + result + } + first = []rune(result)[0] + if unicode.IsLetter(first) && !unicode.IsUpper(first) { + result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) + } + } + + return result +} + +// ContainsStrings searches a slice of strings for a case-sensitive match +func ContainsStrings(coll []string, item string) bool { + for _, a := range coll { + if a == item { + return true + } + } + return false +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + for _, a := range coll { + if strings.EqualFold(a, item) { + return true + } + } + return false +} + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + + // continue with slightly more complex reflection + switch v.Kind() { + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + default: + return false + } +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() +} + +// CommandLineOptionsGroup represents a group of user-defined command line options +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options interface{} +} diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go new file mode 100644 index 000000000..f09ee609f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -0,0 +1,450 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strconv" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" + yaml "gopkg.in/yaml.v3" +) + +// YAMLMatcher matches yaml +func YAMLMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".yaml" || ext == ".yml" +} + +// YAMLToJSON converts YAML unmarshaled data into json compatible data +func YAMLToJSON(data interface{}) (json.RawMessage, error) { + jm, err := transformData(data) + if err != nil { + return nil, err + } + b, err := WriteJSON(jm) + return json.RawMessage(b), err +} + +// BytesToYAMLDoc converts a byte slice into a YAML document +func BytesToYAMLDoc(data []byte) (interface{}, error) { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { + return nil, err + } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported") + } + return &document, nil +} + +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case yamlTimestamp: + return node.Value, nil + case yamlNull: + return nil, nil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) + } +} + +// JSONMapSlice represent a JSON object, with the order of keys maintained +type JSONMapSlice []JSONMapItem + +// MarshalJSON renders a JSONMapSlice as JSON +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON +func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { + w.RawByte('{') + + ln := len(s) + last := ln - 1 + for i := 0; i < ln; i++ { + s[i].MarshalEasyJSON(w) + if i != last { // last item + w.RawByte(',') + } + } + + w.RawByte('}') +} + +// UnmarshalJSON makes a JSONMapSlice from JSON +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON +func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { + if in.IsNull() { + in.Skip() + return + } + + var result JSONMapSlice + in.Delim('{') + for !in.IsDelim('}') { + var mi JSONMapItem + mi.UnmarshalEasyJSON(in) + result = append(result, mi) + } + *s = result +} + +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + for k, v := range val { + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + } + return nil, nil +} + +// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice +type JSONMapItem struct { + Key string + Value interface{} +} + +// MarshalJSON renders a JSONMapItem as JSON +func (s JSONMapItem) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON +func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { + w.String(s.Key) + w.RawByte(':') + w.Raw(WriteJSON(s.Value)) +} + +// UnmarshalJSON makes a JSONMapItem from JSON +func (s *JSONMapItem) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON +func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { + key := in.UnsafeString() + in.WantColon() + value := in.Interface() + in.WantComma() + s.Key = key + s.Value = value +} + +func transformData(input interface{}) (out interface{}, err error) { + format := func(t interface{}) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T", k) + } + } + + switch in := input.(type) { + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) + case map[interface{}]interface{}: + o := make(JSONMapSlice, 0, len(in)) + for ke, va := range in { + var nmi JSONMapItem + if nmi.Key, err = format(ke); err != nil { + return nil, err + } + + v, ert := transformData(va) + if ert != nil { + return nil, ert + } + nmi.Value = v + o = append(o, nmi) + } + return o, nil + case []interface{}: + len1 := len(in) + o := make([]interface{}, len1) + for i := 0; i < len1; i++ { + o[i], err = transformData(in[i]) + if err != nil { + return nil, err + } + } + return o, nil + } + return input, nil +} + +// YAMLDoc loads a yaml document from either http or a file and converts it to json +func YAMLDoc(path string) (json.RawMessage, error) { + yamlDoc, err := YAMLData(path) + if err != nil { + return nil, err + } + + data, err := YAMLToJSON(yamlDoc) + if err != nil { + return nil, err + } + + return data, nil +} + +// YAMLData loads a yaml document from either http or a file +func YAMLData(path string) (interface{}, error) { + data, err := LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + + return BytesToYAMLDoc(data) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f5736..fdff3fdb4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/google/gnostic-models/LICENSE similarity index 100% rename from vendor/github.com/googleapis/gnostic/LICENSE rename to vendor/github.com/google/gnostic-models/LICENSE diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/google/gnostic-models/compiler/README.md similarity index 100% rename from vendor/github.com/googleapis/gnostic/compiler/README.md rename to vendor/github.com/google/gnostic-models/compiler/README.md diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/compiler/context.go rename to vendor/github.com/google/gnostic-models/compiler/context.go diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/google/gnostic-models/compiler/error.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/compiler/error.go rename to vendor/github.com/google/gnostic-models/compiler/error.go diff --git a/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go similarity index 97% rename from vendor/github.com/googleapis/gnostic/compiler/extensions.go rename to vendor/github.com/google/gnostic-models/compiler/extensions.go index 20848a0a1..250c81e8c 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -22,8 +22,9 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/any" - extensions "github.com/googleapis/gnostic/extensions" yaml "gopkg.in/yaml.v3" + + extensions "github.com/google/gnostic-models/extensions" ) // ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go similarity index 99% rename from vendor/github.com/googleapis/gnostic/compiler/helpers.go rename to vendor/github.com/google/gnostic-models/compiler/helpers.go index 48f02f395..975d65e8f 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -20,8 +20,9 @@ import ( "sort" "strconv" - "github.com/googleapis/gnostic/jsonschema" "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/jsonschema" ) // compiler helper functions, usually called from generated code diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/google/gnostic-models/compiler/main.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/compiler/main.go rename to vendor/github.com/google/gnostic-models/compiler/main.go diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/compiler/reader.go rename to vendor/github.com/google/gnostic-models/compiler/reader.go diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/google/gnostic-models/extensions/README.md similarity index 100% rename from vendor/github.com/googleapis/gnostic/extensions/README.md rename to vendor/github.com/google/gnostic-models/extensions/README.md diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go similarity index 99% rename from vendor/github.com/googleapis/gnostic/extensions/extension.pb.go rename to vendor/github.com/google/gnostic-models/extensions/extension.pb.go index 5aab58ebf..a71df8abe 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: extensions/extension.proto package gnostic_extension_v1 diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/google/gnostic-models/extensions/extension.proto similarity index 100% rename from vendor/github.com/googleapis/gnostic/extensions/extension.proto rename to vendor/github.com/google/gnostic-models/extensions/extension.proto diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/extensions/extensions.go rename to vendor/github.com/google/gnostic-models/extensions/extensions.go diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/vendor/github.com/google/gnostic-models/jsonschema/README.md similarity index 100% rename from vendor/github.com/googleapis/gnostic/jsonschema/README.md rename to vendor/github.com/google/gnostic-models/jsonschema/README.md diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/vendor/github.com/google/gnostic-models/jsonschema/base.go similarity index 90% rename from vendor/github.com/googleapis/gnostic/jsonschema/base.go rename to vendor/github.com/google/gnostic-models/jsonschema/base.go index 0af8b148b..5fcc4885a 100644 --- a/vendor/github.com/googleapis/gnostic/jsonschema/base.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/base.go @@ -1,3 +1,16 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // THIS FILE IS AUTOMATICALLY GENERATED. @@ -81,4 +94,4 @@ YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 -c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/vendor/github.com/google/gnostic-models/jsonschema/display.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/jsonschema/display.go rename to vendor/github.com/google/gnostic-models/jsonschema/display.go diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/jsonschema/models.go rename to vendor/github.com/google/gnostic-models/jsonschema/models.go diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/vendor/github.com/google/gnostic-models/jsonschema/operations.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/jsonschema/operations.go rename to vendor/github.com/google/gnostic-models/jsonschema/operations.go diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/jsonschema/reader.go rename to vendor/github.com/google/gnostic-models/jsonschema/reader.go diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/vendor/github.com/google/gnostic-models/jsonschema/schema.json similarity index 100% rename from vendor/github.com/googleapis/gnostic/jsonschema/schema.json rename to vendor/github.com/google/gnostic-models/jsonschema/schema.json diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go similarity index 100% rename from vendor/github.com/googleapis/gnostic/jsonschema/writer.go rename to vendor/github.com/google/gnostic-models/jsonschema/writer.go diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go similarity index 99% rename from vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go index 727d7f4ad..d71fe6d54 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -18,10 +18,12 @@ package openapi_v2 import ( "fmt" - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v3" "regexp" "strings" + + "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go similarity index 99% rename from vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go index 8a5f302f3..65c4c913c 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.5 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv2/OpenAPIv2.proto package openapi_v2 diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto similarity index 100% rename from vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/README.md b/vendor/github.com/google/gnostic-models/openapiv2/README.md similarity index 100% rename from vendor/github.com/googleapis/gnostic/openapiv2/README.md rename to vendor/github.com/google/gnostic-models/openapiv2/README.md diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go similarity index 96% rename from vendor/github.com/googleapis/gnostic/openapiv2/document.go rename to vendor/github.com/google/gnostic-models/openapiv2/document.go index 56e5966b4..e96ac0d6d 100644 --- a/vendor/github.com/googleapis/gnostic/openapiv2/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -15,8 +15,9 @@ package openapi_v2 import ( - "github.com/googleapis/gnostic/compiler" "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json b/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json similarity index 100% rename from vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json rename to vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go new file mode 100644 index 000000000..4b1131ce1 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -0,0 +1,8633 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v3 + +import ( + "fmt" + "regexp" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/compiler" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v3" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // SchemaOrReference schema_or_reference = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchemaOrReference(m, compiler.NewContext("schemaOrReference", m, context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_SchemaOrReference{SchemaOrReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := compiler.BoolForScalarNode(in) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + matched = true + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes := compiler.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAnyOrExpression creates an object of type AnyOrExpression if possible, returning an error if not. +func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpression, error) { + errors := make([]error, 0) + x := &AnyOrExpression{} + matched := false + // Any any = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewAny(m, compiler.NewContext("any", m, context)) + if matchingError == nil { + x.Oneof = &AnyOrExpression_Any{Any: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Expression expression = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewExpression(m, compiler.NewContext("expression", m, context)) + if matchingError == nil { + x.Oneof = &AnyOrExpression_Expression{Expression: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid AnyOrExpression") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewCallback creates an object of type Callback if possible, returning an error if not. +func NewCallback(in *yaml.Node, context *compiler.Context) (*Callback, error) { + errors := make([]error, 0) + x := &Callback{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedPathItem path = 1; + // MAP: PathItem ^ + x.Path = make([]*NamedPathItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if true { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + // repeated NamedAny specification_extension = 2; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewCallbackOrReference creates an object of type CallbackOrReference if possible, returning an error if not. +func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*CallbackOrReference, error) { + errors := make([]error, 0) + x := &CallbackOrReference{} + matched := false + // Callback callback = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewCallback(m, compiler.NewContext("callback", m, context)) + if matchingError == nil { + x.Oneof = &CallbackOrReference_Callback{Callback: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &CallbackOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid CallbackOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewCallbacksOrReferences creates an object of type CallbacksOrReferences if possible, returning an error if not. +func NewCallbacksOrReferences(in *yaml.Node, context *compiler.Context) (*CallbacksOrReferences, error) { + errors := make([]error, 0) + x := &CallbacksOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedCallbackOrReference additional_properties = 1; + // MAP: CallbackOrReference + x.AdditionalProperties = make([]*NamedCallbackOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedCallbackOrReference{} + pair.Name = k + var err error + pair.Value, err = NewCallbackOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewComponents creates an object of type Components if possible, returning an error if not. +func NewComponents(in *yaml.Node, context *compiler.Context) (*Components, error) { + errors := make([]error, 0) + x := &Components{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"callbacks", "examples", "headers", "links", "parameters", "requestBodies", "responses", "schemas", "securitySchemes"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // SchemasOrReferences schemas = 1; + v1 := compiler.MapValueForKey(m, "schemas") + if v1 != nil { + var err error + x.Schemas, err = NewSchemasOrReferences(v1, compiler.NewContext("schemas", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponsesOrReferences responses = 2; + v2 := compiler.MapValueForKey(m, "responses") + if v2 != nil { + var err error + x.Responses, err = NewResponsesOrReferences(v2, compiler.NewContext("responses", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // ParametersOrReferences parameters = 3; + v3 := compiler.MapValueForKey(m, "parameters") + if v3 != nil { + var err error + x.Parameters, err = NewParametersOrReferences(v3, compiler.NewContext("parameters", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v4, compiler.NewContext("examples", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // RequestBodiesOrReferences request_bodies = 5; + v5 := compiler.MapValueForKey(m, "requestBodies") + if v5 != nil { + var err error + x.RequestBodies, err = NewRequestBodiesOrReferences(v5, compiler.NewContext("requestBodies", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // HeadersOrReferences headers = 6; + v6 := compiler.MapValueForKey(m, "headers") + if v6 != nil { + var err error + x.Headers, err = NewHeadersOrReferences(v6, compiler.NewContext("headers", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // SecuritySchemesOrReferences security_schemes = 7; + v7 := compiler.MapValueForKey(m, "securitySchemes") + if v7 != nil { + var err error + x.SecuritySchemes, err = NewSecuritySchemesOrReferences(v7, compiler.NewContext("securitySchemes", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // LinksOrReferences links = 8; + v8 := compiler.MapValueForKey(m, "links") + if v8 != nil { + var err error + x.Links, err = NewLinksOrReferences(v8, compiler.NewContext("links", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // CallbacksOrReferences callbacks = 9; + v9 := compiler.MapValueForKey(m, "callbacks") + if v9 != nil { + var err error + x.Callbacks, err = NewCallbacksOrReferences(v9, compiler.NewContext("callbacks", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 10; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefaultType creates an object of type DefaultType if possible, returning an error if not. +func NewDefaultType(in *yaml.Node, context *compiler.Context) (*DefaultType, error) { + errors := make([]error, 0) + x := &DefaultType{} + matched := false + switch in.Tag { + case "!!bool": + var v bool + v, matched = compiler.BoolForScalarNode(in) + x.Oneof = &DefaultType_Boolean{Boolean: v} + case "!!str": + var v string + v, matched = compiler.StringForScalarNode(in) + x.Oneof = &DefaultType_String_{String_: v} + case "!!float": + var v float64 + v, matched = compiler.FloatForScalarNode(in) + x.Oneof = &DefaultType_Number{Number: v} + case "!!int": + var v int64 + v, matched = compiler.IntForScalarNode(in) + x.Oneof = &DefaultType_Number{Number: float64(v)} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDiscriminator creates an object of type Discriminator if possible, returning an error if not. +func NewDiscriminator(in *yaml.Node, context *compiler.Context) (*Discriminator, error) { + errors := make([]error, 0) + x := &Discriminator{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"propertyName"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"mapping", "propertyName"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string property_name = 1; + v1 := compiler.MapValueForKey(m, "propertyName") + if v1 != nil { + x.PropertyName, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for propertyName: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Strings mapping = 2; + v2 := compiler.MapValueForKey(m, "mapping") + if v2 != nil { + var err error + x.Mapping, err = NewStrings(v2, compiler.NewContext("mapping", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "openapi", "paths"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"components", "externalDocs", "info", "openapi", "paths", "security", "servers", "tags"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string openapi = 1; + v1 := compiler.MapValueForKey(m, "openapi") + if v1 != nil { + x.Openapi, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for openapi: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Server servers = 3; + v3 := compiler.MapValueForKey(m, "servers") + if v3 != nil { + // repeated Server + x.Servers = make([]*Server, 0) + a, ok := compiler.SequenceNodeForNode(v3) + if ok { + for _, item := range a.Content { + y, err := NewServer(item, compiler.NewContext("servers", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Servers = append(x.Servers, y) + } + } + } + // Paths paths = 4; + v4 := compiler.MapValueForKey(m, "paths") + if v4 != nil { + var err error + x.Paths, err = NewPaths(v4, compiler.NewContext("paths", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // Components components = 5; + v5 := compiler.MapValueForKey(m, "components") + if v5 != nil { + var err error + x.Components, err = NewComponents(v5, compiler.NewContext("components", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 6; + v6 := compiler.MapValueForKey(m, "security") + if v6 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v6) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated Tag tags = 7; + v7 := compiler.MapValueForKey(m, "tags") + if v7 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := compiler.SequenceNodeForNode(v7) + if ok { + for _, item := range a.Content { + y, err := NewTag(item, compiler.NewContext("tags", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 9; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewEncoding creates an object of type Encoding if possible, returning an error if not. +func NewEncoding(in *yaml.Node, context *compiler.Context) (*Encoding, error) { + errors := make([]error, 0) + x := &Encoding{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowReserved", "contentType", "explode", "headers", "style"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string content_type = 1; + v1 := compiler.MapValueForKey(m, "contentType") + if v1 != nil { + x.ContentType, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for contentType: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // HeadersOrReferences headers = 2; + v2 := compiler.MapValueForKey(m, "headers") + if v2 != nil { + var err error + x.Headers, err = NewHeadersOrReferences(v2, compiler.NewContext("headers", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // string style = 3; + v3 := compiler.MapValueForKey(m, "style") + if v3 != nil { + x.Style, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool explode = 4; + v4 := compiler.MapValueForKey(m, "explode") + if v4 != nil { + x.Explode, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_reserved = 5; + v5 := compiler.MapValueForKey(m, "allowReserved") + if v5 != nil { + x.AllowReserved, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 6; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewEncodings creates an object of type Encodings if possible, returning an error if not. +func NewEncodings(in *yaml.Node, context *compiler.Context) (*Encodings, error) { + errors := make([]error, 0) + x := &Encodings{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedEncoding additional_properties = 1; + // MAP: Encoding + x.AdditionalProperties = make([]*NamedEncoding, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedEncoding{} + pair.Name = k + var err error + pair.Value, err = NewEncoding(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExample creates an object of type Example if possible, returning an error if not. +func NewExample(in *yaml.Node, context *compiler.Context) (*Example, error) { + errors := make([]error, 0) + x := &Example{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"description", "externalValue", "summary", "value"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string summary = 1; + v1 := compiler.MapValueForKey(m, "summary") + if v1 != nil { + x.Summary, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 3; + v3 := compiler.MapValueForKey(m, "value") + if v3 != nil { + var err error + x.Value, err = NewAny(v3, compiler.NewContext("value", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string external_value = 4; + v4 := compiler.MapValueForKey(m, "externalValue") + if v4 != nil { + x.ExternalValue, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for externalValue: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExampleOrReference creates an object of type ExampleOrReference if possible, returning an error if not. +func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOrReference, error) { + errors := make([]error, 0) + x := &ExampleOrReference{} + matched := false + // Example example = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewExample(m, compiler.NewContext("example", m, context)) + if matchingError == nil { + x.Oneof = &ExampleOrReference_Example{Example: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &ExampleOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ExampleOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamplesOrReferences creates an object of type ExamplesOrReferences if possible, returning an error if not. +func NewExamplesOrReferences(in *yaml.Node, context *compiler.Context) (*ExamplesOrReferences, error) { + errors := make([]error, 0) + x := &ExamplesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedExampleOrReference additional_properties = 1; + // MAP: ExampleOrReference + x.AdditionalProperties = make([]*NamedExampleOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedExampleOrReference{} + pair.Name = k + var err error + pair.Value, err = NewExampleOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExpression creates an object of type Expression if possible, returning an error if not. +func NewExpression(in *yaml.Node, context *compiler.Context) (*Expression, error) { + errors := make([]error, 0) + x := &Expression{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "allowReserved", "content", "deprecated", "description", "example", "examples", "explode", "required", "schema", "style"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 2; + v2 := compiler.MapValueForKey(m, "required") + if v2 != nil { + x.Required, ok = compiler.BoolForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 3; + v3 := compiler.MapValueForKey(m, "deprecated") + if v3 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 4; + v4 := compiler.MapValueForKey(m, "allowEmptyValue") + if v4 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string style = 5; + v5 := compiler.MapValueForKey(m, "style") + if v5 != nil { + x.Style, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool explode = 6; + v6 := compiler.MapValueForKey(m, "explode") + if v6 != nil { + x.Explode, ok = compiler.BoolForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_reserved = 7; + v7 := compiler.MapValueForKey(m, "allowReserved") + if v7 != nil { + x.AllowReserved, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaOrReference schema = 8; + v8 := compiler.MapValueForKey(m, "schema") + if v8 != nil { + var err error + x.Schema, err = NewSchemaOrReference(v8, compiler.NewContext("schema", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 10; + v10 := compiler.MapValueForKey(m, "examples") + if v10 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v10, compiler.NewContext("examples", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // MediaTypes content = 11; + v11 := compiler.MapValueForKey(m, "content") + if v11 != nil { + var err error + x.Content, err = NewMediaTypes(v11, compiler.NewContext("content", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 12; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderOrReference creates an object of type HeaderOrReference if possible, returning an error if not. +func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrReference, error) { + errors := make([]error, 0) + x := &HeaderOrReference{} + matched := false + // Header header = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeader(m, compiler.NewContext("header", m, context)) + if matchingError == nil { + x.Oneof = &HeaderOrReference_Header{Header: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &HeaderOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid HeaderOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeadersOrReferences creates an object of type HeadersOrReferences if possible, returning an error if not. +func NewHeadersOrReferences(in *yaml.Node, context *compiler.Context) (*HeadersOrReferences, error) { + errors := make([]error, 0) + x := &HeadersOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeaderOrReference additional_properties = 1; + // MAP: HeaderOrReference + x.AdditionalProperties = make([]*NamedHeaderOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedHeaderOrReference{} + pair.Name = k + var err error + pair.Value, err = NewHeaderOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "summary", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 3; + v3 := compiler.MapValueForKey(m, "termsOfService") + if v3 != nil { + x.TermsOfService, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 4; + v4 := compiler.MapValueForKey(m, "contact") + if v4 != nil { + var err error + x.Contact, err = NewContact(v4, compiler.NewContext("contact", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 5; + v5 := compiler.MapValueForKey(m, "license") + if v5 != nil { + var err error + x.License, err = NewLicense(v5, compiler.NewContext("license", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // string version = 6; + v6 := compiler.MapValueForKey(m, "version") + if v6 != nil { + x.Version, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 7; + v7 := compiler.MapValueForKey(m, "summary") + if v7 != nil { + x.Summary, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 8; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.SchemaOrReference = make([]*SchemaOrReference, 0) + y, err := NewSchemaOrReference(m, compiler.NewContext("", m, context)) + if err != nil { + return nil, err + } + x.SchemaOrReference = append(x.SchemaOrReference, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLink creates an object of type Link if possible, returning an error if not. +func NewLink(in *yaml.Node, context *compiler.Context) (*Link, error) { + errors := make([]error, 0) + x := &Link{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"description", "operationId", "operationRef", "parameters", "requestBody", "server"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string operation_ref = 1; + v1 := compiler.MapValueForKey(m, "operationRef") + if v1 != nil { + x.OperationRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for operationRef: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string operation_id = 2; + v2 := compiler.MapValueForKey(m, "operationId") + if v2 != nil { + x.OperationId, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // AnyOrExpression parameters = 3; + v3 := compiler.MapValueForKey(m, "parameters") + if v3 != nil { + var err error + x.Parameters, err = NewAnyOrExpression(v3, compiler.NewContext("parameters", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // AnyOrExpression request_body = 4; + v4 := compiler.MapValueForKey(m, "requestBody") + if v4 != nil { + var err error + x.RequestBody, err = NewAnyOrExpression(v4, compiler.NewContext("requestBody", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Server server = 6; + v6 := compiler.MapValueForKey(m, "server") + if v6 != nil { + var err error + x.Server, err = NewServer(v6, compiler.NewContext("server", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 7; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLinkOrReference creates an object of type LinkOrReference if possible, returning an error if not. +func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrReference, error) { + errors := make([]error, 0) + x := &LinkOrReference{} + matched := false + // Link link = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewLink(m, compiler.NewContext("link", m, context)) + if matchingError == nil { + x.Oneof = &LinkOrReference_Link{Link: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &LinkOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid LinkOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLinksOrReferences creates an object of type LinksOrReferences if possible, returning an error if not. +func NewLinksOrReferences(in *yaml.Node, context *compiler.Context) (*LinksOrReferences, error) { + errors := make([]error, 0) + x := &LinksOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedLinkOrReference additional_properties = 1; + // MAP: LinkOrReference + x.AdditionalProperties = make([]*NamedLinkOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedLinkOrReference{} + pair.Name = k + var err error + pair.Value, err = NewLinkOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewMediaType creates an object of type MediaType if possible, returning an error if not. +func NewMediaType(in *yaml.Node, context *compiler.Context) (*MediaType, error) { + errors := make([]error, 0) + x := &MediaType{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"encoding", "example", "examples", "schema"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // SchemaOrReference schema = 1; + v1 := compiler.MapValueForKey(m, "schema") + if v1 != nil { + var err error + x.Schema, err = NewSchemaOrReference(v1, compiler.NewContext("schema", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 2; + v2 := compiler.MapValueForKey(m, "example") + if v2 != nil { + var err error + x.Example, err = NewAny(v2, compiler.NewContext("example", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 3; + v3 := compiler.MapValueForKey(m, "examples") + if v3 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v3, compiler.NewContext("examples", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // Encodings encoding = 4; + v4 := compiler.MapValueForKey(m, "encoding") + if v4 != nil { + var err error + x.Encoding, err = NewEncodings(v4, compiler.NewContext("encoding", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewMediaTypes creates an object of type MediaTypes if possible, returning an error if not. +func NewMediaTypes(in *yaml.Node, context *compiler.Context) (*MediaTypes, error) { + errors := make([]error, 0) + x := &MediaTypes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedMediaType additional_properties = 1; + // MAP: MediaType + x.AdditionalProperties = make([]*NamedMediaType, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedMediaType{} + pair.Name = k + var err error + pair.Value, err = NewMediaType(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedCallbackOrReference creates an object of type NamedCallbackOrReference if possible, returning an error if not. +func NewNamedCallbackOrReference(in *yaml.Node, context *compiler.Context) (*NamedCallbackOrReference, error) { + errors := make([]error, 0) + x := &NamedCallbackOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // CallbackOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewCallbackOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedEncoding creates an object of type NamedEncoding if possible, returning an error if not. +func NewNamedEncoding(in *yaml.Node, context *compiler.Context) (*NamedEncoding, error) { + errors := make([]error, 0) + x := &NamedEncoding{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Encoding value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewEncoding(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedExampleOrReference creates an object of type NamedExampleOrReference if possible, returning an error if not. +func NewNamedExampleOrReference(in *yaml.Node, context *compiler.Context) (*NamedExampleOrReference, error) { + errors := make([]error, 0) + x := &NamedExampleOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExampleOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewExampleOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeaderOrReference creates an object of type NamedHeaderOrReference if possible, returning an error if not. +func NewNamedHeaderOrReference(in *yaml.Node, context *compiler.Context) (*NamedHeaderOrReference, error) { + errors := make([]error, 0) + x := &NamedHeaderOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // HeaderOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeaderOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedLinkOrReference creates an object of type NamedLinkOrReference if possible, returning an error if not. +func NewNamedLinkOrReference(in *yaml.Node, context *compiler.Context) (*NamedLinkOrReference, error) { + errors := make([]error, 0) + x := &NamedLinkOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // LinkOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewLinkOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedMediaType creates an object of type NamedMediaType if possible, returning an error if not. +func NewNamedMediaType(in *yaml.Node, context *compiler.Context) (*NamedMediaType, error) { + errors := make([]error, 0) + x := &NamedMediaType{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // MediaType value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewMediaType(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameterOrReference creates an object of type NamedParameterOrReference if possible, returning an error if not. +func NewNamedParameterOrReference(in *yaml.Node, context *compiler.Context) (*NamedParameterOrReference, error) { + errors := make([]error, 0) + x := &NamedParameterOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ParameterOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameterOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedRequestBodyOrReference creates an object of type NamedRequestBodyOrReference if possible, returning an error if not. +func NewNamedRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*NamedRequestBodyOrReference, error) { + errors := make([]error, 0) + x := &NamedRequestBodyOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // RequestBodyOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewRequestBodyOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseOrReference creates an object of type NamedResponseOrReference if possible, returning an error if not. +func NewNamedResponseOrReference(in *yaml.Node, context *compiler.Context) (*NamedResponseOrReference, error) { + errors := make([]error, 0) + x := &NamedResponseOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchemaOrReference creates an object of type NamedSchemaOrReference if possible, returning an error if not. +func NewNamedSchemaOrReference(in *yaml.Node, context *compiler.Context) (*NamedSchemaOrReference, error) { + errors := make([]error, 0) + x := &NamedSchemaOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchemaOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecuritySchemeOrReference creates an object of type NamedSecuritySchemeOrReference if possible, returning an error if not. +func NewNamedSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*NamedSecuritySchemeOrReference, error) { + errors := make([]error, 0) + x := &NamedSecuritySchemeOrReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecuritySchemeOrReference value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecuritySchemeOrReference(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedServerVariable creates an object of type NamedServerVariable if possible, returning an error if not. +func NewNamedServerVariable(in *yaml.Node, context *compiler.Context) (*NamedServerVariable, error) { + errors := make([]error, 0) + x := &NamedServerVariable{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ServerVariable value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewServerVariable(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauthFlow creates an object of type OauthFlow if possible, returning an error if not. +func NewOauthFlow(in *yaml.Node, context *compiler.Context) (*OauthFlow, error) { + errors := make([]error, 0) + x := &OauthFlow{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"authorizationUrl", "refreshUrl", "scopes", "tokenUrl"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string authorization_url = 1; + v1 := compiler.MapValueForKey(m, "authorizationUrl") + if v1 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 2; + v2 := compiler.MapValueForKey(m, "tokenUrl") + if v2 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string refresh_url = 3; + v3 := compiler.MapValueForKey(m, "refreshUrl") + if v3 != nil { + x.RefreshUrl, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for refreshUrl: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Strings scopes = 4; + v4 := compiler.MapValueForKey(m, "scopes") + if v4 != nil { + var err error + x.Scopes, err = NewStrings(v4, compiler.NewContext("scopes", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauthFlows creates an object of type OauthFlows if possible, returning an error if not. +func NewOauthFlows(in *yaml.Node, context *compiler.Context) (*OauthFlows, error) { + errors := make([]error, 0) + x := &OauthFlows{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"authorizationCode", "clientCredentials", "implicit", "password"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // OauthFlow implicit = 1; + v1 := compiler.MapValueForKey(m, "implicit") + if v1 != nil { + var err error + x.Implicit, err = NewOauthFlow(v1, compiler.NewContext("implicit", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // OauthFlow password = 2; + v2 := compiler.MapValueForKey(m, "password") + if v2 != nil { + var err error + x.Password, err = NewOauthFlow(v2, compiler.NewContext("password", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // OauthFlow client_credentials = 3; + v3 := compiler.MapValueForKey(m, "clientCredentials") + if v3 != nil { + var err error + x.ClientCredentials, err = NewOauthFlow(v3, compiler.NewContext("clientCredentials", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // OauthFlow authorization_code = 4; + v4 := compiler.MapValueForKey(m, "authorizationCode") + if v4 != nil { + var err error + x.AuthorizationCode, err = NewOauthFlow(v4, compiler.NewContext("authorizationCode", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewObject creates an object of type Object if possible, returning an error if not. +func NewObject(in *yaml.Node, context *compiler.Context) (*Object, error) { + errors := make([]error, 0) + x := &Object{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"callbacks", "deprecated", "description", "externalDocs", "operationId", "parameters", "requestBody", "responses", "security", "servers", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := compiler.SequenceNodeForNode(v1) + if ok { + x.Tags = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParameterOrReference parameters = 6; + v6 := compiler.MapValueForKey(m, "parameters") + if v6 != nil { + // repeated ParameterOrReference + x.Parameters = make([]*ParameterOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v6) + if ok { + for _, item := range a.Content { + y, err := NewParameterOrReference(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // RequestBodyOrReference request_body = 7; + v7 := compiler.MapValueForKey(m, "requestBody") + if v7 != nil { + var err error + x.RequestBody, err = NewRequestBodyOrReference(v7, compiler.NewContext("requestBody", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // Responses responses = 8; + v8 := compiler.MapValueForKey(m, "responses") + if v8 != nil { + var err error + x.Responses, err = NewResponses(v8, compiler.NewContext("responses", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // CallbacksOrReferences callbacks = 9; + v9 := compiler.MapValueForKey(m, "callbacks") + if v9 != nil { + var err error + x.Callbacks, err = NewCallbacksOrReferences(v9, compiler.NewContext("callbacks", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool deprecated = 10; + v10 := compiler.MapValueForKey(m, "deprecated") + if v10 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v10) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 11; + v11 := compiler.MapValueForKey(m, "security") + if v11 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v11) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated Server servers = 12; + v12 := compiler.MapValueForKey(m, "servers") + if v12 != nil { + // repeated Server + x.Servers = make([]*Server, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewServer(item, compiler.NewContext("servers", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Servers = append(x.Servers, y) + } + } + } + // repeated NamedAny specification_extension = 13; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"allowEmptyValue", "allowReserved", "content", "deprecated", "description", "example", "examples", "explode", "in", "name", "required", "schema", "style"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 5; + v5 := compiler.MapValueForKey(m, "deprecated") + if v5 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 6; + v6 := compiler.MapValueForKey(m, "allowEmptyValue") + if v6 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string style = 7; + v7 := compiler.MapValueForKey(m, "style") + if v7 != nil { + x.Style, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for style: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool explode = 8; + v8 := compiler.MapValueForKey(m, "explode") + if v8 != nil { + x.Explode, ok = compiler.BoolForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for explode: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_reserved = 9; + v9 := compiler.MapValueForKey(m, "allowReserved") + if v9 != nil { + x.AllowReserved, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for allowReserved: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaOrReference schema = 10; + v10 := compiler.MapValueForKey(m, "schema") + if v10 != nil { + var err error + x.Schema, err = NewSchemaOrReference(v10, compiler.NewContext("schema", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 11; + v11 := compiler.MapValueForKey(m, "example") + if v11 != nil { + var err error + x.Example, err = NewAny(v11, compiler.NewContext("example", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExamplesOrReferences examples = 12; + v12 := compiler.MapValueForKey(m, "examples") + if v12 != nil { + var err error + x.Examples, err = NewExamplesOrReferences(v12, compiler.NewContext("examples", v12, context)) + if err != nil { + errors = append(errors, err) + } + } + // MediaTypes content = 13; + v13 := compiler.MapValueForKey(m, "content") + if v13 != nil { + var err error + x.Content, err = NewMediaTypes(v13, compiler.NewContext("content", v13, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 14; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterOrReference creates an object of type ParameterOrReference if possible, returning an error if not. +func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*ParameterOrReference, error) { + errors := make([]error, 0) + x := &ParameterOrReference{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) + if matchingError == nil { + x.Oneof = &ParameterOrReference_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &ParameterOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ParameterOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersOrReferences creates an object of type ParametersOrReferences if possible, returning an error if not. +func NewParametersOrReferences(in *yaml.Node, context *compiler.Context) (*ParametersOrReferences, error) { + errors := make([]error, 0) + x := &ParametersOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameterOrReference additional_properties = 1; + // MAP: ParameterOrReference + x.AdditionalProperties = make([]*NamedParameterOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedParameterOrReference{} + pair.Name = k + var err error + pair.Value, err = NewParameterOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "description", "get", "head", "options", "parameters", "patch", "post", "put", "servers", "summary", "trace"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 4; + v4 := compiler.MapValueForKey(m, "get") + if v4 != nil { + var err error + x.Get, err = NewOperation(v4, compiler.NewContext("get", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 5; + v5 := compiler.MapValueForKey(m, "put") + if v5 != nil { + var err error + x.Put, err = NewOperation(v5, compiler.NewContext("put", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 6; + v6 := compiler.MapValueForKey(m, "post") + if v6 != nil { + var err error + x.Post, err = NewOperation(v6, compiler.NewContext("post", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 7; + v7 := compiler.MapValueForKey(m, "delete") + if v7 != nil { + var err error + x.Delete, err = NewOperation(v7, compiler.NewContext("delete", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 8; + v8 := compiler.MapValueForKey(m, "options") + if v8 != nil { + var err error + x.Options, err = NewOperation(v8, compiler.NewContext("options", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 9; + v9 := compiler.MapValueForKey(m, "head") + if v9 != nil { + var err error + x.Head, err = NewOperation(v9, compiler.NewContext("head", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 10; + v10 := compiler.MapValueForKey(m, "patch") + if v10 != nil { + var err error + x.Patch, err = NewOperation(v10, compiler.NewContext("patch", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation trace = 11; + v11 := compiler.MapValueForKey(m, "trace") + if v11 != nil { + var err error + x.Trace, err = NewOperation(v11, compiler.NewContext("trace", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Server servers = 12; + v12 := compiler.MapValueForKey(m, "servers") + if v12 != nil { + // repeated Server + x.Servers = make([]*Server, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewServer(item, compiler.NewContext("servers", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Servers = append(x.Servers, y) + } + } + } + // repeated ParameterOrReference parameters = 13; + v13 := compiler.MapValueForKey(m, "parameters") + if v13 != nil { + // repeated ParameterOrReference + x.Parameters = make([]*ParameterOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v13) + if ok { + for _, item := range a.Content { + y, err := NewParameterOrReference(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny specification_extension = 14; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedPathItem path = 1; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + // repeated NamedAny specification_extension = 2; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchemaOrReference additional_properties = 1; + // MAP: SchemaOrReference + x.AdditionalProperties = make([]*NamedSchemaOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchemaOrReference{} + pair.Name = k + var err error + pair.Value, err = NewSchemaOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewReference creates an object of type Reference if possible, returning an error if not. +func NewReference(in *yaml.Node, context *compiler.Context) (*Reference, error) { + errors := make([]error, 0) + x := &Reference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewRequestBodiesOrReferences creates an object of type RequestBodiesOrReferences if possible, returning an error if not. +func NewRequestBodiesOrReferences(in *yaml.Node, context *compiler.Context) (*RequestBodiesOrReferences, error) { + errors := make([]error, 0) + x := &RequestBodiesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedRequestBodyOrReference additional_properties = 1; + // MAP: RequestBodyOrReference + x.AdditionalProperties = make([]*NamedRequestBodyOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedRequestBodyOrReference{} + pair.Name = k + var err error + pair.Value, err = NewRequestBodyOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewRequestBody creates an object of type RequestBody if possible, returning an error if not. +func NewRequestBody(in *yaml.Node, context *compiler.Context) (*RequestBody, error) { + errors := make([]error, 0) + x := &RequestBody{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"content"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"content", "description", "required"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // MediaTypes content = 2; + v2 := compiler.MapValueForKey(m, "content") + if v2 != nil { + var err error + x.Content, err = NewMediaTypes(v2, compiler.NewContext("content", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool required = 3; + v3 := compiler.MapValueForKey(m, "required") + if v3 != nil { + x.Required, ok = compiler.BoolForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewRequestBodyOrReference creates an object of type RequestBodyOrReference if possible, returning an error if not. +func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*RequestBodyOrReference, error) { + errors := make([]error, 0) + x := &RequestBodyOrReference{} + matched := false + // RequestBody request_body = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewRequestBody(m, compiler.NewContext("requestBody", m, context)) + if matchingError == nil { + x.Oneof = &RequestBodyOrReference_RequestBody{RequestBody: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &RequestBodyOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid RequestBodyOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"content", "description", "headers", "links"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // HeadersOrReferences headers = 2; + v2 := compiler.MapValueForKey(m, "headers") + if v2 != nil { + var err error + x.Headers, err = NewHeadersOrReferences(v2, compiler.NewContext("headers", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // MediaTypes content = 3; + v3 := compiler.MapValueForKey(m, "content") + if v3 != nil { + var err error + x.Content, err = NewMediaTypes(v3, compiler.NewContext("content", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // LinksOrReferences links = 4; + v4 := compiler.MapValueForKey(m, "links") + if v4 != nil { + var err error + x.Links, err = NewLinksOrReferences(v4, compiler.NewContext("links", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 5; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseOrReference creates an object of type ResponseOrReference if possible, returning an error if not. +func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*ResponseOrReference, error) { + errors := make([]error, 0) + x := &ResponseOrReference{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) + if matchingError == nil { + x.Oneof = &ResponseOrReference_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &ResponseOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ResponseOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"default"} + allowedPatterns := []*regexp.Regexp{pattern3, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // ResponseOrReference default = 1; + v1 := compiler.MapValueForKey(m, "default") + if v1 != nil { + var err error + x.Default, err = NewResponseOrReference(v1, compiler.NewContext("default", v1, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedResponseOrReference response_or_reference = 2; + // MAP: ResponseOrReference ^([0-9X]{3})$ + x.ResponseOrReference = make([]*NamedResponseOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if pattern3.MatchString(k) { + pair := &NamedResponseOrReference{} + pair.Name = k + var err error + pair.Value, err = NewResponseOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseOrReference = append(x.ResponseOrReference, pair) + } + } + } + // repeated NamedAny specification_extension = 3; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponsesOrReferences creates an object of type ResponsesOrReferences if possible, returning an error if not. +func NewResponsesOrReferences(in *yaml.Node, context *compiler.Context) (*ResponsesOrReferences, error) { + errors := make([]error, 0) + x := &ResponsesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponseOrReference additional_properties = 1; + // MAP: ResponseOrReference + x.AdditionalProperties = make([]*NamedResponseOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedResponseOrReference{} + pair.Name = k + var err error + pair.Value, err = NewResponseOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"additionalProperties", "allOf", "anyOf", "default", "deprecated", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "not", "nullable", "oneOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "writeOnly", "xml"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool nullable = 1; + v1 := compiler.MapValueForKey(m, "nullable") + if v1 != nil { + x.Nullable, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for nullable: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Discriminator discriminator = 2; + v2 := compiler.MapValueForKey(m, "discriminator") + if v2 != nil { + var err error + x.Discriminator, err = NewDiscriminator(v2, compiler.NewContext("discriminator", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool read_only = 3; + v3 := compiler.MapValueForKey(m, "readOnly") + if v3 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool write_only = 4; + v4 := compiler.MapValueForKey(m, "writeOnly") + if v4 != nil { + x.WriteOnly, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for writeOnly: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 5; + v5 := compiler.MapValueForKey(m, "xml") + if v5 != nil { + var err error + x.Xml, err = NewXml(v5, compiler.NewContext("xml", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 6; + v6 := compiler.MapValueForKey(m, "externalDocs") + if v6 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v6, compiler.NewContext("externalDocs", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 7; + v7 := compiler.MapValueForKey(m, "example") + if v7 != nil { + var err error + x.Example, err = NewAny(v7, compiler.NewContext("example", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // bool deprecated = 8; + v8 := compiler.MapValueForKey(m, "deprecated") + if v8 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 9; + v9 := compiler.MapValueForKey(m, "title") + if v9 != nil { + x.Title, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float multiple_of = 10; + v10 := compiler.MapValueForKey(m, "multipleOf") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 21; + v21 := compiler.MapValueForKey(m, "maxProperties") + if v21 != nil { + t, ok := compiler.IntForScalarNode(v21) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 22; + v22 := compiler.MapValueForKey(m, "minProperties") + if v22 != nil { + t, ok := compiler.IntForScalarNode(v22) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 23; + v23 := compiler.MapValueForKey(m, "required") + if v23 != nil { + v, ok := compiler.SequenceNodeForNode(v23) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v23)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 24; + v24 := compiler.MapValueForKey(m, "enum") + if v24 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v24) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // string type = 25; + v25 := compiler.MapValueForKey(m, "type") + if v25 != nil { + x.Type, ok = compiler.StringForScalarNode(v25) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v25)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SchemaOrReference all_of = 26; + v26 := compiler.MapValueForKey(m, "allOf") + if v26 != nil { + // repeated SchemaOrReference + x.AllOf = make([]*SchemaOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v26) + if ok { + for _, item := range a.Content { + y, err := NewSchemaOrReference(item, compiler.NewContext("allOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // repeated SchemaOrReference one_of = 27; + v27 := compiler.MapValueForKey(m, "oneOf") + if v27 != nil { + // repeated SchemaOrReference + x.OneOf = make([]*SchemaOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v27) + if ok { + for _, item := range a.Content { + y, err := NewSchemaOrReference(item, compiler.NewContext("oneOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.OneOf = append(x.OneOf, y) + } + } + } + // repeated SchemaOrReference any_of = 28; + v28 := compiler.MapValueForKey(m, "anyOf") + if v28 != nil { + // repeated SchemaOrReference + x.AnyOf = make([]*SchemaOrReference, 0) + a, ok := compiler.SequenceNodeForNode(v28) + if ok { + for _, item := range a.Content { + y, err := NewSchemaOrReference(item, compiler.NewContext("anyOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.AnyOf = append(x.AnyOf, y) + } + } + } + // Schema not = 29; + v29 := compiler.MapValueForKey(m, "not") + if v29 != nil { + var err error + x.Not, err = NewSchema(v29, compiler.NewContext("not", v29, context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 30; + v30 := compiler.MapValueForKey(m, "items") + if v30 != nil { + var err error + x.Items, err = NewItemsItem(v30, compiler.NewContext("items", v30, context)) + if err != nil { + errors = append(errors, err) + } + } + // Properties properties = 31; + v31 := compiler.MapValueForKey(m, "properties") + if v31 != nil { + var err error + x.Properties, err = NewProperties(v31, compiler.NewContext("properties", v31, context)) + if err != nil { + errors = append(errors, err) + } + } + // AdditionalPropertiesItem additional_properties = 32; + v32 := compiler.MapValueForKey(m, "additionalProperties") + if v32 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v32, compiler.NewContext("additionalProperties", v32, context)) + if err != nil { + errors = append(errors, err) + } + } + // DefaultType default = 33; + v33 := compiler.MapValueForKey(m, "default") + if v33 != nil { + var err error + x.Default, err = NewDefaultType(v33, compiler.NewContext("default", v33, context)) + if err != nil { + errors = append(errors, err) + } + } + // string description = 34; + v34 := compiler.MapValueForKey(m, "description") + if v34 != nil { + x.Description, ok = compiler.StringForScalarNode(v34) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v34)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 35; + v35 := compiler.MapValueForKey(m, "format") + if v35 != nil { + x.Format, ok = compiler.StringForScalarNode(v35) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v35)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 36; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaOrReference creates an object of type SchemaOrReference if possible, returning an error if not. +func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrReference, error) { + errors := make([]error, 0) + x := &SchemaOrReference{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) + if matchingError == nil { + x.Oneof = &SchemaOrReference_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &SchemaOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SchemaOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemasOrReferences creates an object of type SchemasOrReferences if possible, returning an error if not. +func NewSchemasOrReferences(in *yaml.Node, context *compiler.Context) (*SchemasOrReferences, error) { + errors := make([]error, 0) + x := &SchemasOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchemaOrReference additional_properties = 1; + // MAP: SchemaOrReference + x.AdditionalProperties = make([]*NamedSchemaOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchemaOrReference{} + pair.Name = k + var err error + pair.Value, err = NewSchemaOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityScheme creates an object of type SecurityScheme if possible, returning an error if not. +func NewSecurityScheme(in *yaml.Node, context *compiler.Context) (*SecurityScheme, error) { + errors := make([]error, 0) + x := &SecurityScheme{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"bearerFormat", "description", "flows", "in", "name", "openIdConnectUrl", "scheme", "type"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 3; + v3 := compiler.MapValueForKey(m, "name") + if v3 != nil { + x.Name, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 4; + v4 := compiler.MapValueForKey(m, "in") + if v4 != nil { + x.In, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string scheme = 5; + v5 := compiler.MapValueForKey(m, "scheme") + if v5 != nil { + x.Scheme, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for scheme: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string bearer_format = 6; + v6 := compiler.MapValueForKey(m, "bearerFormat") + if v6 != nil { + x.BearerFormat, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for bearerFormat: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // OauthFlows flows = 7; + v7 := compiler.MapValueForKey(m, "flows") + if v7 != nil { + var err error + x.Flows, err = NewOauthFlows(v7, compiler.NewContext("flows", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // string open_id_connect_url = 8; + v8 := compiler.MapValueForKey(m, "openIdConnectUrl") + if v8 != nil { + x.OpenIdConnectUrl, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for openIdConnectUrl: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 9; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecuritySchemeOrReference creates an object of type SecuritySchemeOrReference if possible, returning an error if not. +func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*SecuritySchemeOrReference, error) { + errors := make([]error, 0) + x := &SecuritySchemeOrReference{} + matched := false + // SecurityScheme security_scheme = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSecurityScheme(m, compiler.NewContext("securityScheme", m, context)) + if matchingError == nil { + x.Oneof = &SecuritySchemeOrReference_SecurityScheme{SecurityScheme: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Reference reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewReference(m, compiler.NewContext("reference", m, context)) + if matchingError == nil { + x.Oneof = &SecuritySchemeOrReference_Reference{Reference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecuritySchemesOrReferences creates an object of type SecuritySchemesOrReferences if possible, returning an error if not. +func NewSecuritySchemesOrReferences(in *yaml.Node, context *compiler.Context) (*SecuritySchemesOrReferences, error) { + errors := make([]error, 0) + x := &SecuritySchemesOrReferences{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecuritySchemeOrReference additional_properties = 1; + // MAP: SecuritySchemeOrReference + x.AdditionalProperties = make([]*NamedSecuritySchemeOrReference, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSecuritySchemeOrReference{} + pair.Name = k + var err error + pair.Value, err = NewSecuritySchemeOrReference(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewServer creates an object of type Server if possible, returning an error if not. +func NewServer(in *yaml.Node, context *compiler.Context) (*Server, error) { + errors := make([]error, 0) + x := &Server{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url", "variables"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string url = 1; + v1 := compiler.MapValueForKey(m, "url") + if v1 != nil { + x.Url, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ServerVariables variables = 3; + v3 := compiler.MapValueForKey(m, "variables") + if v3 != nil { + var err error + x.Variables, err = NewServerVariables(v3, compiler.NewContext("variables", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewServerVariable creates an object of type ServerVariable if possible, returning an error if not. +func NewServerVariable(in *yaml.Node, context *compiler.Context) (*ServerVariable, error) { + errors := make([]error, 0) + x := &ServerVariable{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"default"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "enum"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string enum = 1; + v1 := compiler.MapValueForKey(m, "enum") + if v1 != nil { + v, ok := compiler.SequenceNodeForNode(v1) + if ok { + x.Enum = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for enum: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string default = 2; + v2 := compiler.MapValueForKey(m, "default") + if v2 != nil { + x.Default, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for default: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewServerVariables creates an object of type ServerVariables if possible, returning an error if not. +func NewServerVariables(in *yaml.Node, context *compiler.Context) (*ServerVariables, error) { + errors := make([]error, 0) + x := &ServerVariables{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedServerVariable additional_properties = 1; + // MAP: ServerVariable + x.AdditionalProperties = make([]*NamedServerVariable, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedServerVariable{} + pair.Name = k + var err error + pair.Value, err = NewServerVariable(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSpecificationExtension creates an object of type SpecificationExtension if possible, returning an error if not. +func NewSpecificationExtension(in *yaml.Node, context *compiler.Context) (*SpecificationExtension, error) { + errors := make([]error, 0) + x := &SpecificationExtension{} + matched := false + switch in.Tag { + case "!!bool": + var v bool + v, matched = compiler.BoolForScalarNode(in) + x.Oneof = &SpecificationExtension_Boolean{Boolean: v} + case "!!str": + var v string + v, matched = compiler.StringForScalarNode(in) + x.Oneof = &SpecificationExtension_String_{String_: v} + case "!!float": + var v float64 + v, matched = compiler.FloatForScalarNode(in) + x.Oneof = &SpecificationExtension_Number{Number: v} + case "!!int": + var v int64 + v, matched = compiler.IntForScalarNode(in) + x.Oneof = &SpecificationExtension_Number{Number: float64(v)} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + x.Value = make([]string, 0) + for _, node := range in.Content { + s, _ := compiler.StringForScalarNode(node) + x.Value = append(x.Value, s) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStrings creates an object of type Strings if possible, returning an error if not. +func NewStrings(in *yaml.Node, context *compiler.Context) (*Strings, error) { + errors := make([]error, 0) + x := &Strings{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedString{} + pair.Name = k + pair.Value, _ = compiler.StringForScalarNode(v) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny specification_extension = 4; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny specification_extension = 6; + // MAP: Any ^x- + x.SpecificationExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.SpecificationExtension = append(x.SpecificationExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_SchemaOrReference) + if ok { + _, err := p.SchemaOrReference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AnyOrExpression objects. +func (m *AnyOrExpression) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AnyOrExpression_Any) + if ok { + _, err := p.Any.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*AnyOrExpression_Expression) + if ok { + _, err := p.Expression.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Callback objects. +func (m *Callback) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside CallbackOrReference objects. +func (m *CallbackOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*CallbackOrReference_Callback) + if ok { + _, err := p.Callback.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*CallbackOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside CallbacksOrReferences objects. +func (m *CallbacksOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Components objects. +func (m *Components) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schemas != nil { + _, err := m.Schemas.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.RequestBodies != nil { + _, err := m.RequestBodies.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.SecuritySchemes != nil { + _, err := m.SecuritySchemes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Links != nil { + _, err := m.Links.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Callbacks != nil { + _, err := m.Callbacks.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside DefaultType objects. +func (m *DefaultType) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Discriminator objects. +func (m *Discriminator) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Mapping != nil { + _, err := m.Mapping.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Servers { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Components != nil { + _, err := m.Components.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Encoding objects. +func (m *Encoding) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Encodings objects. +func (m *Encodings) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Example objects. +func (m *Example) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExampleOrReference objects. +func (m *ExampleOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ExampleOrReference_Example) + if ok { + _, err := p.Example.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ExampleOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExamplesOrReferences objects. +func (m *ExamplesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Expression objects. +func (m *Expression) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderOrReference objects. +func (m *HeaderOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*HeaderOrReference_Header) + if ok { + _, err := p.Header.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*HeaderOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeadersOrReferences objects. +func (m *HeadersOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SchemaOrReference { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Link objects. +func (m *Link) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.RequestBody != nil { + _, err := m.RequestBody.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Server != nil { + _, err := m.Server.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside LinkOrReference objects. +func (m *LinkOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*LinkOrReference_Link) + if ok { + _, err := p.Link.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*LinkOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside LinksOrReferences objects. +func (m *LinksOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside MediaType objects. +func (m *MediaType) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Encoding != nil { + _, err := m.Encoding.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside MediaTypes objects. +func (m *MediaTypes) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedCallbackOrReference objects. +func (m *NamedCallbackOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedEncoding objects. +func (m *NamedEncoding) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedExampleOrReference objects. +func (m *NamedExampleOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeaderOrReference objects. +func (m *NamedHeaderOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedLinkOrReference objects. +func (m *NamedLinkOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedMediaType objects. +func (m *NamedMediaType) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameterOrReference objects. +func (m *NamedParameterOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedRequestBodyOrReference objects. +func (m *NamedRequestBodyOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseOrReference objects. +func (m *NamedResponseOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchemaOrReference objects. +func (m *NamedSchemaOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecuritySchemeOrReference objects. +func (m *NamedSecuritySchemeOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedServerVariable objects. +func (m *NamedServerVariable) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside OauthFlow objects. +func (m *OauthFlow) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside OauthFlows objects. +func (m *OauthFlows) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Implicit != nil { + _, err := m.Implicit.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Password != nil { + _, err := m.Password.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ClientCredentials != nil { + _, err := m.ClientCredentials.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.AuthorizationCode != nil { + _, err := m.AuthorizationCode.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Object objects. +func (m *Object) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.RequestBody != nil { + _, err := m.RequestBody.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Callbacks != nil { + _, err := m.Callbacks.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Servers { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterOrReference objects. +func (m *ParameterOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParameterOrReference_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParameterOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersOrReferences objects. +func (m *ParametersOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Trace != nil { + _, err := m.Trace.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Servers { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Reference objects. +func (m *Reference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside RequestBodiesOrReferences objects. +func (m *RequestBodiesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside RequestBody objects. +func (m *RequestBody) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside RequestBodyOrReference objects. +func (m *RequestBodyOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*RequestBodyOrReference_RequestBody) + if ok { + _, err := p.RequestBody.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*RequestBodyOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Content != nil { + _, err := m.Content.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Links != nil { + _, err := m.Links.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseOrReference objects. +func (m *ResponseOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseOrReference_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.ResponseOrReference { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponsesOrReferences objects. +func (m *ResponsesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Discriminator != nil { + _, err := m.Discriminator.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.OneOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.AnyOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Not != nil { + _, err := m.Not.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaOrReference objects. +func (m *SchemaOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaOrReference_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemasOrReferences objects. +func (m *SchemasOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityScheme objects. +func (m *SecurityScheme) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Flows != nil { + _, err := m.Flows.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecuritySchemeOrReference objects. +func (m *SecuritySchemeOrReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecuritySchemeOrReference_SecurityScheme) + if ok { + _, err := p.SecurityScheme.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecuritySchemeOrReference_Reference) + if ok { + _, err := p.Reference.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecuritySchemesOrReferences objects. +func (m *SecuritySchemesOrReferences) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Server objects. +func (m *Server) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Variables != nil { + _, err := m.Variables.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ServerVariable objects. +func (m *ServerVariable) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ServerVariables objects. +func (m *ServerVariables) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SpecificationExtension objects. +func (m *SpecificationExtension) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Strings objects. +func (m *Strings) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.SpecificationExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schemaOrReference Type:SchemaOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchemaOrReference() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() *yaml.Node { + var err error + var node yaml.Node + err = yaml.Unmarshal([]byte(m.Yaml), &node) + if err == nil { + if node.Kind == yaml.DocumentNode { + return node.Content[0] + } + return &node + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of AnyOrExpression suitable for JSON or YAML export. +func (m *AnyOrExpression) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // AnyOrExpression + // {Name:any Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetAny() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:expression Type:Expression StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetExpression() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Callback suitable for JSON or YAML export. +func (m *Callback) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Path != nil { + for _, item := range m.Path { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of CallbackOrReference suitable for JSON or YAML export. +func (m *CallbackOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // CallbackOrReference + // {Name:callback Type:Callback StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetCallback() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of CallbacksOrReferences suitable for JSON or YAML export. +func (m *CallbacksOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Components suitable for JSON or YAML export. +func (m *Components) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Schemas != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemas")) + info.Content = append(info.Content, m.Schemas.ToRawInfo()) + } + if m.Responses != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + } + if m.Parameters != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.RequestBodies != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBodies")) + info.Content = append(info.Content, m.RequestBodies.ToRawInfo()) + } + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.SecuritySchemes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("securitySchemes")) + info.Content = append(info.Content, m.SecuritySchemes.ToRawInfo()) + } + if m.Links != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("links")) + info.Content = append(info.Content, m.Links.ToRawInfo()) + } + if m.Callbacks != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("callbacks")) + info.Content = append(info.Content, m.Callbacks.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.Email != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of DefaultType suitable for JSON or YAML export. +func (m *DefaultType) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // DefaultType + // {Name:number Type:float StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v0, ok := m.GetOneof().(*DefaultType_Number); ok { + return compiler.NewScalarNodeForFloat(v0.Number) + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*DefaultType_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + // {Name:string Type:string StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v2, ok := m.GetOneof().(*DefaultType_String_); ok { + return compiler.NewScalarNodeForString(v2.String_) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Discriminator suitable for JSON or YAML export. +func (m *Discriminator) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("propertyName")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.PropertyName)) + if m.Mapping != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("mapping")) + info.Content = append(info.Content, m.Mapping.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("openapi")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Openapi)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) + info.Content = append(info.Content, m.Info.ToRawInfo()) + if len(m.Servers) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Servers { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) + info.Content = append(info.Content, items) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) + info.Content = append(info.Content, m.Paths.ToRawInfo()) + if m.Components != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("components")) + info.Content = append(info.Content, m.Components.ToRawInfo()) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if len(m.Tags) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Tags { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, items) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Encoding suitable for JSON or YAML export. +func (m *Encoding) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.ContentType != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("contentType")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.ContentType)) + } + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.Style != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) + } + if m.Explode != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) + } + if m.AllowReserved != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Encodings suitable for JSON or YAML export. +func (m *Encodings) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Example suitable for JSON or YAML export. +func (m *Example) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Value != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, m.Value.ToRawInfo()) + } + if m.ExternalValue != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.ExternalValue)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ExampleOrReference suitable for JSON or YAML export. +func (m *ExampleOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ExampleOrReference + // {Name:example Type:Example StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetExample() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ExamplesOrReferences suitable for JSON or YAML export. +func (m *ExamplesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Expression suitable for JSON or YAML export. +func (m *Expression) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Style != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) + } + if m.Explode != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) + } + if m.AllowReserved != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) + } + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.Content != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of HeaderOrReference suitable for JSON or YAML export. +func (m *HeaderOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // HeaderOrReference + // {Name:header Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeader() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of HeadersOrReferences suitable for JSON or YAML export. +func (m *HeadersOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.TermsOfService != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) + } + if m.Contact != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) + info.Content = append(info.Content, m.Contact.ToRawInfo()) + } + if m.License != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) + info.Content = append(info.Content, m.License.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.SchemaOrReference) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.SchemaOrReference { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemaOrReference")) + info.Content = append(info.Content, items) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Link suitable for JSON or YAML export. +func (m *Link) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.OperationRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationRef")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationRef)) + } + if m.OperationId != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) + } + if m.Parameters != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) + } + if m.RequestBody != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBody")) + info.Content = append(info.Content, m.RequestBody.ToRawInfo()) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Server != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("server")) + info.Content = append(info.Content, m.Server.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of LinkOrReference suitable for JSON or YAML export. +func (m *LinkOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // LinkOrReference + // {Name:link Type:Link StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetLink() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of LinksOrReferences suitable for JSON or YAML export. +func (m *LinksOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of MediaType suitable for JSON or YAML export. +func (m *MediaType) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.Encoding != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("encoding")) + info.Content = append(info.Content, m.Encoding.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of MediaTypes suitable for JSON or YAML export. +func (m *MediaTypes) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, m.Value.ToRawInfo()) + } + return info +} + +// ToRawInfo returns a description of NamedCallbackOrReference suitable for JSON or YAML export. +func (m *NamedCallbackOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:CallbackOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedEncoding suitable for JSON or YAML export. +func (m *NamedEncoding) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Encoding StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedExampleOrReference suitable for JSON or YAML export. +func (m *NamedExampleOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ExampleOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeaderOrReference suitable for JSON or YAML export. +func (m *NamedHeaderOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:HeaderOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedLinkOrReference suitable for JSON or YAML export. +func (m *NamedLinkOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:LinkOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedMediaType suitable for JSON or YAML export. +func (m *NamedMediaType) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:MediaType StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameterOrReference suitable for JSON or YAML export. +func (m *NamedParameterOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ParameterOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedRequestBodyOrReference suitable for JSON or YAML export. +func (m *NamedRequestBodyOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:RequestBodyOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseOrReference suitable for JSON or YAML export. +func (m *NamedResponseOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ResponseOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchemaOrReference suitable for JSON or YAML export. +func (m *NamedSchemaOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:SchemaOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecuritySchemeOrReference suitable for JSON or YAML export. +func (m *NamedSecuritySchemeOrReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:SecuritySchemeOrReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedServerVariable suitable for JSON or YAML export. +func (m *NamedServerVariable) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ServerVariable StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of OauthFlow suitable for JSON or YAML export. +func (m *OauthFlow) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AuthorizationUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + } + if m.TokenUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + } + if m.RefreshUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("refreshUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.RefreshUrl)) + } + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of OauthFlows suitable for JSON or YAML export. +func (m *OauthFlows) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Implicit != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("implicit")) + info.Content = append(info.Content, m.Implicit.ToRawInfo()) + } + if m.Password != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("password")) + info.Content = append(info.Content, m.Password.ToRawInfo()) + } + if m.ClientCredentials != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("clientCredentials")) + info.Content = append(info.Content, m.ClientCredentials.ToRawInfo()) + } + if m.AuthorizationCode != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationCode")) + info.Content = append(info.Content, m.AuthorizationCode.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Object suitable for JSON or YAML export. +func (m *Object) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Tags) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.OperationId != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + if m.RequestBody != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("requestBody")) + info.Content = append(info.Content, m.RequestBody.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + if m.Callbacks != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("callbacks")) + info.Content = append(info.Content, m.Callbacks.ToRawInfo()) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if len(m.Servers) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Servers { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) + info.Content = append(info.Content, items) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Style != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("style")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Style)) + } + if m.Explode != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("explode")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Explode)) + } + if m.AllowReserved != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowReserved")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowReserved)) + } + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.Content != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ParameterOrReference suitable for JSON or YAML export. +func (m *ParameterOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ParameterOrReference + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ParametersOrReferences suitable for JSON or YAML export. +func (m *ParametersOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Get != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) + info.Content = append(info.Content, m.Get.ToRawInfo()) + } + if m.Put != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) + info.Content = append(info.Content, m.Put.ToRawInfo()) + } + if m.Post != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) + info.Content = append(info.Content, m.Post.ToRawInfo()) + } + if m.Delete != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) + info.Content = append(info.Content, m.Delete.ToRawInfo()) + } + if m.Options != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) + info.Content = append(info.Content, m.Options.ToRawInfo()) + } + if m.Head != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) + info.Content = append(info.Content, m.Head.ToRawInfo()) + } + if m.Patch != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) + info.Content = append(info.Content, m.Patch.ToRawInfo()) + } + if m.Trace != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("trace")) + info.Content = append(info.Content, m.Trace.ToRawInfo()) + } + if len(m.Servers) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Servers { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("servers")) + info.Content = append(info.Content, items) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Path != nil { + for _, item := range m.Path { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Reference suitable for JSON or YAML export. +func (m *Reference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + return info +} + +// ToRawInfo returns a description of RequestBodiesOrReferences suitable for JSON or YAML export. +func (m *RequestBodiesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of RequestBody suitable for JSON or YAML export. +func (m *RequestBody) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of RequestBodyOrReference suitable for JSON or YAML export. +func (m *RequestBodyOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // RequestBodyOrReference + // {Name:requestBody Type:RequestBody StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetRequestBody() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.Content != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("content")) + info.Content = append(info.Content, m.Content.ToRawInfo()) + } + if m.Links != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("links")) + info.Content = append(info.Content, m.Links.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseOrReference suitable for JSON or YAML export. +func (m *ResponseOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ResponseOrReference + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.ResponseOrReference != nil { + for _, item := range m.ResponseOrReference { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponsesOrReferences suitable for JSON or YAML export. +func (m *ResponsesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Nullable != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("nullable")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Nullable)) + } + if m.Discriminator != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) + info.Content = append(info.Content, m.Discriminator.ToRawInfo()) + } + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.WriteOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("writeOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.WriteOnly)) + } + if m.Xml != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) + info.Content = append(info.Content, m.Xml.ToRawInfo()) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if m.MaxProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) + } + if m.MinProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if len(m.AllOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.AllOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) + info.Content = append(info.Content, items) + } + if len(m.OneOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.OneOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("oneOf")) + info.Content = append(info.Content, items) + } + if len(m.AnyOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.AnyOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("anyOf")) + info.Content = append(info.Content, items) + } + if m.Not != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("not")) + info.Content = append(info.Content, m.Not.ToRawInfo()) + } + if m.Items != nil { + items := compiler.NewSequenceNode() + for _, item := range m.Items.SchemaOrReference { + items.Content = append(items.Content, item.ToRawInfo()) + } + if len(items.Content) == 1 { + items = items.Content[0] + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, items) + } + if m.Properties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) + info.Content = append(info.Content, m.Properties.ToRawInfo()) + } + if m.AdditionalProperties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) + info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SchemaOrReference suitable for JSON or YAML export. +func (m *SchemaOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SchemaOrReference + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SchemasOrReferences suitable for JSON or YAML export. +func (m *SchemasOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecurityScheme suitable for JSON or YAML export. +func (m *SecurityScheme) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Scheme != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scheme")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Scheme)) + } + if m.BearerFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("bearerFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BearerFormat)) + } + if m.Flows != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("flows")) + info.Content = append(info.Content, m.Flows.ToRawInfo()) + } + if m.OpenIdConnectUrl != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("openIdConnectUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OpenIdConnectUrl)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecuritySchemeOrReference suitable for JSON or YAML export. +func (m *SecuritySchemeOrReference) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SecuritySchemeOrReference + // {Name:securityScheme Type:SecurityScheme StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSecurityScheme() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:reference Type:Reference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SecuritySchemesOrReferences suitable for JSON or YAML export. +func (m *SecuritySchemesOrReferences) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Server suitable for JSON or YAML export. +func (m *Server) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Variables != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("variables")) + info.Content = append(info.Content, m.Variables.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ServerVariable suitable for JSON or YAML export. +func (m *ServerVariable) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Enum) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Enum)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Default)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ServerVariables suitable for JSON or YAML export. +func (m *ServerVariables) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SpecificationExtension suitable for JSON or YAML export. +func (m *SpecificationExtension) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SpecificationExtension + // {Name:number Type:float StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v0, ok := m.GetOneof().(*SpecificationExtension_Number); ok { + return compiler.NewScalarNodeForFloat(v0.Number) + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*SpecificationExtension_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + // {Name:string Type:string StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v2, ok := m.GetOneof().(*SpecificationExtension_String_); ok { + return compiler.NewScalarNodeForString(v2.String_) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() *yaml.Node { + return compiler.NewSequenceNodeForStringArray(m.Value) +} + +// ToRawInfo returns a description of Strings suitable for JSON or YAML export. +func (m *Strings) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Namespace != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) + } + if m.Prefix != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) + } + if m.Attribute != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) + } + if m.Wrapped != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) + } + if m.SpecificationExtension != nil { + for _, item := range m.SpecificationExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +var ( + pattern0 = regexp.MustCompile("^") + pattern1 = regexp.MustCompile("^x-") + pattern2 = regexp.MustCompile("^/") + pattern3 = regexp.MustCompile("^([0-9X]{3})$") +) diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go new file mode 100644 index 000000000..945b8d11f --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go @@ -0,0 +1,8053 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.19.3 +// source: openapiv3/OpenAPIv3.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AdditionalPropertiesItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *AdditionalPropertiesItem_SchemaOrReference + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *AdditionalPropertiesItem) Reset() { + *x = AdditionalPropertiesItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdditionalPropertiesItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdditionalPropertiesItem) ProtoMessage() {} + +func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{0} +} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *AdditionalPropertiesItem) GetSchemaOrReference() *SchemaOrReference { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_SchemaOrReference); ok { + return x.SchemaOrReference + } + return nil +} + +func (x *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_SchemaOrReference struct { + SchemaOrReference *SchemaOrReference `protobuf:"bytes,1,opt,name=schema_or_reference,json=schemaOrReference,proto3,oneof"` +} + +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +func (*AdditionalPropertiesItem_SchemaOrReference) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{1} +} + +func (x *Any) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +func (x *Any) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +type AnyOrExpression struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *AnyOrExpression_Any + // *AnyOrExpression_Expression + Oneof isAnyOrExpression_Oneof `protobuf_oneof:"oneof"` +} + +func (x *AnyOrExpression) Reset() { + *x = AnyOrExpression{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyOrExpression) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyOrExpression) ProtoMessage() {} + +func (x *AnyOrExpression) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyOrExpression.ProtoReflect.Descriptor instead. +func (*AnyOrExpression) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{2} +} + +func (m *AnyOrExpression) GetOneof() isAnyOrExpression_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *AnyOrExpression) GetAny() *Any { + if x, ok := x.GetOneof().(*AnyOrExpression_Any); ok { + return x.Any + } + return nil +} + +func (x *AnyOrExpression) GetExpression() *Expression { + if x, ok := x.GetOneof().(*AnyOrExpression_Expression); ok { + return x.Expression + } + return nil +} + +type isAnyOrExpression_Oneof interface { + isAnyOrExpression_Oneof() +} + +type AnyOrExpression_Any struct { + Any *Any `protobuf:"bytes,1,opt,name=any,proto3,oneof"` +} + +type AnyOrExpression_Expression struct { + Expression *Expression `protobuf:"bytes,2,opt,name=expression,proto3,oneof"` +} + +func (*AnyOrExpression_Any) isAnyOrExpression_Oneof() {} + +func (*AnyOrExpression_Expression) isAnyOrExpression_Oneof() {} + +// A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation. +type Callback struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path []*NamedPathItem `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,2,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Callback) Reset() { + *x = Callback{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Callback) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Callback) ProtoMessage() {} + +func (x *Callback) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Callback.ProtoReflect.Descriptor instead. +func (*Callback) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{3} +} + +func (x *Callback) GetPath() []*NamedPathItem { + if x != nil { + return x.Path + } + return nil +} + +func (x *Callback) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type CallbackOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *CallbackOrReference_Callback + // *CallbackOrReference_Reference + Oneof isCallbackOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *CallbackOrReference) Reset() { + *x = CallbackOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallbackOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallbackOrReference) ProtoMessage() {} + +func (x *CallbackOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallbackOrReference.ProtoReflect.Descriptor instead. +func (*CallbackOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{4} +} + +func (m *CallbackOrReference) GetOneof() isCallbackOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *CallbackOrReference) GetCallback() *Callback { + if x, ok := x.GetOneof().(*CallbackOrReference_Callback); ok { + return x.Callback + } + return nil +} + +func (x *CallbackOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*CallbackOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isCallbackOrReference_Oneof interface { + isCallbackOrReference_Oneof() +} + +type CallbackOrReference_Callback struct { + Callback *Callback `protobuf:"bytes,1,opt,name=callback,proto3,oneof"` +} + +type CallbackOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*CallbackOrReference_Callback) isCallbackOrReference_Oneof() {} + +func (*CallbackOrReference_Reference) isCallbackOrReference_Oneof() {} + +type CallbacksOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedCallbackOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *CallbacksOrReferences) Reset() { + *x = CallbacksOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallbacksOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallbacksOrReferences) ProtoMessage() {} + +func (x *CallbacksOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallbacksOrReferences.ProtoReflect.Descriptor instead. +func (*CallbacksOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{5} +} + +func (x *CallbacksOrReferences) GetAdditionalProperties() []*NamedCallbackOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object. +type Components struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Schemas *SchemasOrReferences `protobuf:"bytes,1,opt,name=schemas,proto3" json:"schemas,omitempty"` + Responses *ResponsesOrReferences `protobuf:"bytes,2,opt,name=responses,proto3" json:"responses,omitempty"` + Parameters *ParametersOrReferences `protobuf:"bytes,3,opt,name=parameters,proto3" json:"parameters,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + RequestBodies *RequestBodiesOrReferences `protobuf:"bytes,5,opt,name=request_bodies,json=requestBodies,proto3" json:"request_bodies,omitempty"` + Headers *HeadersOrReferences `protobuf:"bytes,6,opt,name=headers,proto3" json:"headers,omitempty"` + SecuritySchemes *SecuritySchemesOrReferences `protobuf:"bytes,7,opt,name=security_schemes,json=securitySchemes,proto3" json:"security_schemes,omitempty"` + Links *LinksOrReferences `protobuf:"bytes,8,opt,name=links,proto3" json:"links,omitempty"` + Callbacks *CallbacksOrReferences `protobuf:"bytes,9,opt,name=callbacks,proto3" json:"callbacks,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,10,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Components) Reset() { + *x = Components{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Components) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Components) ProtoMessage() {} + +func (x *Components) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Components.ProtoReflect.Descriptor instead. +func (*Components) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{6} +} + +func (x *Components) GetSchemas() *SchemasOrReferences { + if x != nil { + return x.Schemas + } + return nil +} + +func (x *Components) GetResponses() *ResponsesOrReferences { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Components) GetParameters() *ParametersOrReferences { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Components) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Components) GetRequestBodies() *RequestBodiesOrReferences { + if x != nil { + return x.RequestBodies + } + return nil +} + +func (x *Components) GetHeaders() *HeadersOrReferences { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Components) GetSecuritySchemes() *SecuritySchemesOrReferences { + if x != nil { + return x.SecuritySchemes + } + return nil +} + +func (x *Components) GetLinks() *LinksOrReferences { + if x != nil { + return x.Links + } + return nil +} + +func (x *Components) GetCallbacks() *CallbacksOrReferences { + if x != nil { + return x.Callbacks + } + return nil +} + +func (x *Components) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Contact information for the exposed API. +type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Contact) Reset() { + *x = Contact{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{7} +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Contact) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type DefaultType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *DefaultType_Number + // *DefaultType_Boolean + // *DefaultType_String_ + Oneof isDefaultType_Oneof `protobuf_oneof:"oneof"` +} + +func (x *DefaultType) Reset() { + *x = DefaultType{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DefaultType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DefaultType) ProtoMessage() {} + +func (x *DefaultType) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DefaultType.ProtoReflect.Descriptor instead. +func (*DefaultType) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{8} +} + +func (m *DefaultType) GetOneof() isDefaultType_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *DefaultType) GetNumber() float64 { + if x, ok := x.GetOneof().(*DefaultType_Number); ok { + return x.Number + } + return 0 +} + +func (x *DefaultType) GetBoolean() bool { + if x, ok := x.GetOneof().(*DefaultType_Boolean); ok { + return x.Boolean + } + return false +} + +func (x *DefaultType) GetString_() string { + if x, ok := x.GetOneof().(*DefaultType_String_); ok { + return x.String_ + } + return "" +} + +type isDefaultType_Oneof interface { + isDefaultType_Oneof() +} + +type DefaultType_Number struct { + Number float64 `protobuf:"fixed64,1,opt,name=number,proto3,oneof"` +} + +type DefaultType_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +type DefaultType_String_ struct { + String_ string `protobuf:"bytes,3,opt,name=string,proto3,oneof"` +} + +func (*DefaultType_Number) isDefaultType_Oneof() {} + +func (*DefaultType_Boolean) isDefaultType_Oneof() {} + +func (*DefaultType_String_) isDefaultType_Oneof() {} + +// When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered. +type Discriminator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PropertyName string `protobuf:"bytes,1,opt,name=property_name,json=propertyName,proto3" json:"property_name,omitempty"` + Mapping *Strings `protobuf:"bytes,2,opt,name=mapping,proto3" json:"mapping,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Discriminator) Reset() { + *x = Discriminator{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Discriminator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Discriminator) ProtoMessage() {} + +func (x *Discriminator) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Discriminator.ProtoReflect.Descriptor instead. +func (*Discriminator) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{9} +} + +func (x *Discriminator) GetPropertyName() string { + if x != nil { + return x.PropertyName + } + return "" +} + +func (x *Discriminator) GetMapping() *Strings { + if x != nil { + return x.Mapping + } + return nil +} + +func (x *Discriminator) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Openapi string `protobuf:"bytes,1,opt,name=openapi,proto3" json:"openapi,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + Servers []*Server `protobuf:"bytes,3,rep,name=servers,proto3" json:"servers,omitempty"` + Paths *Paths `protobuf:"bytes,4,opt,name=paths,proto3" json:"paths,omitempty"` + Components *Components `protobuf:"bytes,5,opt,name=components,proto3" json:"components,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,6,rep,name=security,proto3" json:"security,omitempty"` + Tags []*Tag `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,9,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{10} +} + +func (x *Document) GetOpenapi() string { + if x != nil { + return x.Openapi + } + return "" +} + +func (x *Document) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Document) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +func (x *Document) GetPaths() *Paths { + if x != nil { + return x.Paths + } + return nil +} + +func (x *Document) GetComponents() *Components { + if x != nil { + return x.Components + } + return nil +} + +func (x *Document) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Document) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Document) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Document) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// A single encoding definition applied to a single schema property. +type Encoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + Headers *HeadersOrReferences `protobuf:"bytes,2,opt,name=headers,proto3" json:"headers,omitempty"` + Style string `protobuf:"bytes,3,opt,name=style,proto3" json:"style,omitempty"` + Explode bool `protobuf:"varint,4,opt,name=explode,proto3" json:"explode,omitempty"` + AllowReserved bool `protobuf:"varint,5,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,6,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Encoding) Reset() { + *x = Encoding{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Encoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Encoding) ProtoMessage() {} + +func (x *Encoding) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Encoding.ProtoReflect.Descriptor instead. +func (*Encoding) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{11} +} + +func (x *Encoding) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *Encoding) GetHeaders() *HeadersOrReferences { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Encoding) GetStyle() string { + if x != nil { + return x.Style + } + return "" +} + +func (x *Encoding) GetExplode() bool { + if x != nil { + return x.Explode + } + return false +} + +func (x *Encoding) GetAllowReserved() bool { + if x != nil { + return x.AllowReserved + } + return false +} + +func (x *Encoding) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Encodings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedEncoding `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Encodings) Reset() { + *x = Encodings{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Encodings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Encodings) ProtoMessage() {} + +func (x *Encodings) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Encodings.ProtoReflect.Descriptor instead. +func (*Encodings) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{12} +} + +func (x *Encodings) GetAdditionalProperties() []*NamedEncoding { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Example struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value *Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + ExternalValue string `protobuf:"bytes,4,opt,name=external_value,json=externalValue,proto3" json:"external_value,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Example) Reset() { + *x = Example{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Example) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Example) ProtoMessage() {} + +func (x *Example) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Example.ProtoReflect.Descriptor instead. +func (*Example) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{13} +} + +func (x *Example) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Example) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Example) GetValue() *Any { + if x != nil { + return x.Value + } + return nil +} + +func (x *Example) GetExternalValue() string { + if x != nil { + return x.ExternalValue + } + return "" +} + +func (x *Example) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ExampleOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ExampleOrReference_Example + // *ExampleOrReference_Reference + Oneof isExampleOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ExampleOrReference) Reset() { + *x = ExampleOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExampleOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExampleOrReference) ProtoMessage() {} + +func (x *ExampleOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExampleOrReference.ProtoReflect.Descriptor instead. +func (*ExampleOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{14} +} + +func (m *ExampleOrReference) GetOneof() isExampleOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ExampleOrReference) GetExample() *Example { + if x, ok := x.GetOneof().(*ExampleOrReference_Example); ok { + return x.Example + } + return nil +} + +func (x *ExampleOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*ExampleOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isExampleOrReference_Oneof interface { + isExampleOrReference_Oneof() +} + +type ExampleOrReference_Example struct { + Example *Example `protobuf:"bytes,1,opt,name=example,proto3,oneof"` +} + +type ExampleOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*ExampleOrReference_Example) isExampleOrReference_Oneof() {} + +func (*ExampleOrReference_Reference) isExampleOrReference_Oneof() {} + +type ExamplesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedExampleOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ExamplesOrReferences) Reset() { + *x = ExamplesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExamplesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExamplesOrReferences) ProtoMessage() {} + +func (x *ExamplesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExamplesOrReferences.ProtoReflect.Descriptor instead. +func (*ExamplesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{15} +} + +func (x *ExamplesOrReferences) GetAdditionalProperties() []*NamedExampleOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Expression struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Expression) Reset() { + *x = Expression{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expression) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expression) ProtoMessage() {} + +func (x *Expression) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expression.ProtoReflect.Descriptor instead. +func (*Expression) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{16} +} + +func (x *Expression) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Allows referencing an external resource for extended documentation. +type ExternalDocs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *ExternalDocs) Reset() { + *x = ExternalDocs{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalDocs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocs) ProtoMessage() {} + +func (x *ExternalDocs) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{17} +} + +func (x *ExternalDocs) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocs) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalDocs) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`). +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,2,opt,name=required,proto3" json:"required,omitempty"` + Deprecated bool `protobuf:"varint,3,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + AllowEmptyValue bool `protobuf:"varint,4,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Style string `protobuf:"bytes,5,opt,name=style,proto3" json:"style,omitempty"` + Explode bool `protobuf:"varint,6,opt,name=explode,proto3" json:"explode,omitempty"` + AllowReserved bool `protobuf:"varint,7,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` + Schema *SchemaOrReference `protobuf:"bytes,8,opt,name=schema,proto3" json:"schema,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,10,opt,name=examples,proto3" json:"examples,omitempty"` + Content *MediaTypes `protobuf:"bytes,11,opt,name=content,proto3" json:"content,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,12,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{18} +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Header) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Header) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *Header) GetStyle() string { + if x != nil { + return x.Style + } + return "" +} + +func (x *Header) GetExplode() bool { + if x != nil { + return x.Explode + } + return false +} + +func (x *Header) GetAllowReserved() bool { + if x != nil { + return x.AllowReserved + } + return false +} + +func (x *Header) GetSchema() *SchemaOrReference { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Header) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Header) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Header) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *Header) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type HeaderOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *HeaderOrReference_Header + // *HeaderOrReference_Reference + Oneof isHeaderOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *HeaderOrReference) Reset() { + *x = HeaderOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderOrReference) ProtoMessage() {} + +func (x *HeaderOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderOrReference.ProtoReflect.Descriptor instead. +func (*HeaderOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{19} +} + +func (m *HeaderOrReference) GetOneof() isHeaderOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *HeaderOrReference) GetHeader() *Header { + if x, ok := x.GetOneof().(*HeaderOrReference_Header); ok { + return x.Header + } + return nil +} + +func (x *HeaderOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*HeaderOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isHeaderOrReference_Oneof interface { + isHeaderOrReference_Oneof() +} + +type HeaderOrReference_Header struct { + Header *Header `protobuf:"bytes,1,opt,name=header,proto3,oneof"` +} + +type HeaderOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*HeaderOrReference_Header) isHeaderOrReference_Oneof() {} + +func (*HeaderOrReference_Reference) isHeaderOrReference_Oneof() {} + +type HeadersOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedHeaderOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *HeadersOrReferences) Reset() { + *x = HeadersOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadersOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadersOrReferences) ProtoMessage() {} + +func (x *HeadersOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadersOrReferences.ProtoReflect.Descriptor instead. +func (*HeadersOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{20} +} + +func (x *HeadersOrReferences) GetAdditionalProperties() []*NamedHeaderOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience. +type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"` + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,7,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` + Summary string `protobuf:"bytes,8,opt,name=summary,proto3" json:"summary,omitempty"` +} + +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{21} +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +func (x *Info) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +type ItemsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SchemaOrReference []*SchemaOrReference `protobuf:"bytes,1,rep,name=schema_or_reference,json=schemaOrReference,proto3" json:"schema_or_reference,omitempty"` +} + +func (x *ItemsItem) Reset() { + *x = ItemsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ItemsItem) ProtoMessage() {} + +func (x *ItemsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. +func (*ItemsItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{22} +} + +func (x *ItemsItem) GetSchemaOrReference() []*SchemaOrReference { + if x != nil { + return x.SchemaOrReference + } + return nil +} + +// License information for the exposed API. +type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *License) Reset() { + *x = License{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{23} +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *License) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation. +type Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OperationRef string `protobuf:"bytes,1,opt,name=operation_ref,json=operationRef,proto3" json:"operation_ref,omitempty"` + OperationId string `protobuf:"bytes,2,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + Parameters *AnyOrExpression `protobuf:"bytes,3,opt,name=parameters,proto3" json:"parameters,omitempty"` + RequestBody *AnyOrExpression `protobuf:"bytes,4,opt,name=request_body,json=requestBody,proto3" json:"request_body,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + Server *Server `protobuf:"bytes,6,opt,name=server,proto3" json:"server,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,7,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Link) Reset() { + *x = Link{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Link) ProtoMessage() {} + +func (x *Link) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Link.ProtoReflect.Descriptor instead. +func (*Link) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{24} +} + +func (x *Link) GetOperationRef() string { + if x != nil { + return x.OperationRef + } + return "" +} + +func (x *Link) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Link) GetParameters() *AnyOrExpression { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Link) GetRequestBody() *AnyOrExpression { + if x != nil { + return x.RequestBody + } + return nil +} + +func (x *Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Link) GetServer() *Server { + if x != nil { + return x.Server + } + return nil +} + +func (x *Link) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type LinkOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *LinkOrReference_Link + // *LinkOrReference_Reference + Oneof isLinkOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *LinkOrReference) Reset() { + *x = LinkOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinkOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinkOrReference) ProtoMessage() {} + +func (x *LinkOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinkOrReference.ProtoReflect.Descriptor instead. +func (*LinkOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{25} +} + +func (m *LinkOrReference) GetOneof() isLinkOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *LinkOrReference) GetLink() *Link { + if x, ok := x.GetOneof().(*LinkOrReference_Link); ok { + return x.Link + } + return nil +} + +func (x *LinkOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*LinkOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isLinkOrReference_Oneof interface { + isLinkOrReference_Oneof() +} + +type LinkOrReference_Link struct { + Link *Link `protobuf:"bytes,1,opt,name=link,proto3,oneof"` +} + +type LinkOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*LinkOrReference_Link) isLinkOrReference_Oneof() {} + +func (*LinkOrReference_Reference) isLinkOrReference_Oneof() {} + +type LinksOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedLinkOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *LinksOrReferences) Reset() { + *x = LinksOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LinksOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LinksOrReferences) ProtoMessage() {} + +func (x *LinksOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LinksOrReferences.ProtoReflect.Descriptor instead. +func (*LinksOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{26} +} + +func (x *LinksOrReferences) GetAdditionalProperties() []*NamedLinkOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Each Media Type Object provides schema and examples for the media type identified by its key. +type MediaType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Schema *SchemaOrReference `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` + Example *Any `protobuf:"bytes,2,opt,name=example,proto3" json:"example,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,3,opt,name=examples,proto3" json:"examples,omitempty"` + Encoding *Encodings `protobuf:"bytes,4,opt,name=encoding,proto3" json:"encoding,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *MediaType) Reset() { + *x = MediaType{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MediaType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MediaType) ProtoMessage() {} + +func (x *MediaType) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MediaType.ProtoReflect.Descriptor instead. +func (*MediaType) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{27} +} + +func (x *MediaType) GetSchema() *SchemaOrReference { + if x != nil { + return x.Schema + } + return nil +} + +func (x *MediaType) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *MediaType) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *MediaType) GetEncoding() *Encodings { + if x != nil { + return x.Encoding + } + return nil +} + +func (x *MediaType) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type MediaTypes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedMediaType `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *MediaTypes) Reset() { + *x = MediaTypes{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MediaTypes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MediaTypes) ProtoMessage() {} + +func (x *MediaTypes) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MediaTypes.ProtoReflect.Descriptor instead. +func (*MediaTypes) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{28} +} + +func (x *MediaTypes) GetAdditionalProperties() []*NamedMediaType { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedAny) Reset() { + *x = NamedAny{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedAny) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedAny) ProtoMessage() {} + +func (x *NamedAny) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. +func (*NamedAny) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{29} +} + +func (x *NamedAny) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedAny) GetValue() *Any { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of CallbackOrReference as ordered (name,value) pairs. +type NamedCallbackOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *CallbackOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedCallbackOrReference) Reset() { + *x = NamedCallbackOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedCallbackOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedCallbackOrReference) ProtoMessage() {} + +func (x *NamedCallbackOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedCallbackOrReference.ProtoReflect.Descriptor instead. +func (*NamedCallbackOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{30} +} + +func (x *NamedCallbackOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedCallbackOrReference) GetValue() *CallbackOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Encoding as ordered (name,value) pairs. +type NamedEncoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Encoding `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedEncoding) Reset() { + *x = NamedEncoding{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedEncoding) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedEncoding) ProtoMessage() {} + +func (x *NamedEncoding) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedEncoding.ProtoReflect.Descriptor instead. +func (*NamedEncoding) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{31} +} + +func (x *NamedEncoding) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedEncoding) GetValue() *Encoding { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ExampleOrReference as ordered (name,value) pairs. +type NamedExampleOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ExampleOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedExampleOrReference) Reset() { + *x = NamedExampleOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedExampleOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedExampleOrReference) ProtoMessage() {} + +func (x *NamedExampleOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedExampleOrReference.ProtoReflect.Descriptor instead. +func (*NamedExampleOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{32} +} + +func (x *NamedExampleOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedExampleOrReference) GetValue() *ExampleOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of HeaderOrReference as ordered (name,value) pairs. +type NamedHeaderOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *HeaderOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedHeaderOrReference) Reset() { + *x = NamedHeaderOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedHeaderOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedHeaderOrReference) ProtoMessage() {} + +func (x *NamedHeaderOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedHeaderOrReference.ProtoReflect.Descriptor instead. +func (*NamedHeaderOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{33} +} + +func (x *NamedHeaderOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedHeaderOrReference) GetValue() *HeaderOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of LinkOrReference as ordered (name,value) pairs. +type NamedLinkOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *LinkOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedLinkOrReference) Reset() { + *x = NamedLinkOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedLinkOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedLinkOrReference) ProtoMessage() {} + +func (x *NamedLinkOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedLinkOrReference.ProtoReflect.Descriptor instead. +func (*NamedLinkOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{34} +} + +func (x *NamedLinkOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedLinkOrReference) GetValue() *LinkOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of MediaType as ordered (name,value) pairs. +type NamedMediaType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *MediaType `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedMediaType) Reset() { + *x = NamedMediaType{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedMediaType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedMediaType) ProtoMessage() {} + +func (x *NamedMediaType) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedMediaType.ProtoReflect.Descriptor instead. +func (*NamedMediaType) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{35} +} + +func (x *NamedMediaType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedMediaType) GetValue() *MediaType { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ParameterOrReference as ordered (name,value) pairs. +type NamedParameterOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ParameterOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedParameterOrReference) Reset() { + *x = NamedParameterOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedParameterOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedParameterOrReference) ProtoMessage() {} + +func (x *NamedParameterOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedParameterOrReference.ProtoReflect.Descriptor instead. +func (*NamedParameterOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{36} +} + +func (x *NamedParameterOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedParameterOrReference) GetValue() *ParameterOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedPathItem) Reset() { + *x = NamedPathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedPathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedPathItem) ProtoMessage() {} + +func (x *NamedPathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{37} +} + +func (x *NamedPathItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedPathItem) GetValue() *PathItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of RequestBodyOrReference as ordered (name,value) pairs. +type NamedRequestBodyOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *RequestBodyOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedRequestBodyOrReference) Reset() { + *x = NamedRequestBodyOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedRequestBodyOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedRequestBodyOrReference) ProtoMessage() {} + +func (x *NamedRequestBodyOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedRequestBodyOrReference.ProtoReflect.Descriptor instead. +func (*NamedRequestBodyOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{38} +} + +func (x *NamedRequestBodyOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedRequestBodyOrReference) GetValue() *RequestBodyOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseOrReference as ordered (name,value) pairs. +type NamedResponseOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ResponseOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponseOrReference) Reset() { + *x = NamedResponseOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponseOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponseOrReference) ProtoMessage() {} + +func (x *NamedResponseOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponseOrReference.ProtoReflect.Descriptor instead. +func (*NamedResponseOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{39} +} + +func (x *NamedResponseOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponseOrReference) GetValue() *ResponseOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SchemaOrReference as ordered (name,value) pairs. +type NamedSchemaOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SchemaOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSchemaOrReference) Reset() { + *x = NamedSchemaOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSchemaOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSchemaOrReference) ProtoMessage() {} + +func (x *NamedSchemaOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSchemaOrReference.ProtoReflect.Descriptor instead. +func (*NamedSchemaOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{40} +} + +func (x *NamedSchemaOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSchemaOrReference) GetValue() *SchemaOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecuritySchemeOrReference as ordered (name,value) pairs. +type NamedSecuritySchemeOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SecuritySchemeOrReference `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSecuritySchemeOrReference) Reset() { + *x = NamedSecuritySchemeOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSecuritySchemeOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSecuritySchemeOrReference) ProtoMessage() {} + +func (x *NamedSecuritySchemeOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSecuritySchemeOrReference.ProtoReflect.Descriptor instead. +func (*NamedSecuritySchemeOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{41} +} + +func (x *NamedSecuritySchemeOrReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSecuritySchemeOrReference) GetValue() *SecuritySchemeOrReference { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ServerVariable as ordered (name,value) pairs. +type NamedServerVariable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ServerVariable `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedServerVariable) Reset() { + *x = NamedServerVariable{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedServerVariable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedServerVariable) ProtoMessage() {} + +func (x *NamedServerVariable) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedServerVariable.ProtoReflect.Descriptor instead. +func (*NamedServerVariable) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{42} +} + +func (x *NamedServerVariable) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedServerVariable) GetValue() *ServerVariable { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedString) Reset() { + *x = NamedString{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedString) ProtoMessage() {} + +func (x *NamedString) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. +func (*NamedString) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{43} +} + +func (x *NamedString) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedString) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedStringArray) Reset() { + *x = NamedStringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedStringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedStringArray) ProtoMessage() {} + +func (x *NamedStringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{44} +} + +func (x *NamedStringArray) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedStringArray) GetValue() *StringArray { + if x != nil { + return x.Value + } + return nil +} + +// Configuration details for a supported OAuth Flow +type OauthFlow struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AuthorizationUrl string `protobuf:"bytes,1,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,2,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + RefreshUrl string `protobuf:"bytes,3,opt,name=refresh_url,json=refreshUrl,proto3" json:"refresh_url,omitempty"` + Scopes *Strings `protobuf:"bytes,4,opt,name=scopes,proto3" json:"scopes,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *OauthFlow) Reset() { + *x = OauthFlow{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OauthFlow) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OauthFlow) ProtoMessage() {} + +func (x *OauthFlow) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OauthFlow.ProtoReflect.Descriptor instead. +func (*OauthFlow) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{45} +} + +func (x *OauthFlow) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *OauthFlow) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *OauthFlow) GetRefreshUrl() string { + if x != nil { + return x.RefreshUrl + } + return "" +} + +func (x *OauthFlow) GetScopes() *Strings { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *OauthFlow) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Allows configuration of the supported OAuth Flows. +type OauthFlows struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Implicit *OauthFlow `protobuf:"bytes,1,opt,name=implicit,proto3" json:"implicit,omitempty"` + Password *OauthFlow `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + ClientCredentials *OauthFlow `protobuf:"bytes,3,opt,name=client_credentials,json=clientCredentials,proto3" json:"client_credentials,omitempty"` + AuthorizationCode *OauthFlow `protobuf:"bytes,4,opt,name=authorization_code,json=authorizationCode,proto3" json:"authorization_code,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *OauthFlows) Reset() { + *x = OauthFlows{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OauthFlows) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OauthFlows) ProtoMessage() {} + +func (x *OauthFlows) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OauthFlows.ProtoReflect.Descriptor instead. +func (*OauthFlows) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{46} +} + +func (x *OauthFlows) GetImplicit() *OauthFlow { + if x != nil { + return x.Implicit + } + return nil +} + +func (x *OauthFlows) GetPassword() *OauthFlow { + if x != nil { + return x.Password + } + return nil +} + +func (x *OauthFlows) GetClientCredentials() *OauthFlow { + if x != nil { + return x.ClientCredentials + } + return nil +} + +func (x *OauthFlows) GetAuthorizationCode() *OauthFlow { + if x != nil { + return x.AuthorizationCode + } + return nil +} + +func (x *OauthFlows) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Object) Reset() { + *x = Object{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Object) ProtoMessage() {} + +func (x *Object) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Object.ProtoReflect.Descriptor instead. +func (*Object) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{47} +} + +func (x *Object) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Describes a single API operation on a path. +type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + Parameters []*ParameterOrReference `protobuf:"bytes,6,rep,name=parameters,proto3" json:"parameters,omitempty"` + RequestBody *RequestBodyOrReference `protobuf:"bytes,7,opt,name=request_body,json=requestBody,proto3" json:"request_body,omitempty"` + Responses *Responses `protobuf:"bytes,8,opt,name=responses,proto3" json:"responses,omitempty"` + Callbacks *CallbacksOrReferences `protobuf:"bytes,9,opt,name=callbacks,proto3" json:"callbacks,omitempty"` + Deprecated bool `protobuf:"varint,10,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,11,rep,name=security,proto3" json:"security,omitempty"` + Servers []*Server `protobuf:"bytes,12,rep,name=servers,proto3" json:"servers,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,13,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{48} +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetParameters() []*ParameterOrReference { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Operation) GetRequestBody() *RequestBodyOrReference { + if x != nil { + return x.RequestBody + } + return nil +} + +func (x *Operation) GetResponses() *Responses { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetCallbacks() *CallbacksOrReferences { + if x != nil { + return x.Callbacks + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +func (x *Operation) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Describes a single operation parameter. A unique parameter is defined by a combination of a name and location. +type Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Deprecated bool `protobuf:"varint,5,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + AllowEmptyValue bool `protobuf:"varint,6,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Style string `protobuf:"bytes,7,opt,name=style,proto3" json:"style,omitempty"` + Explode bool `protobuf:"varint,8,opt,name=explode,proto3" json:"explode,omitempty"` + AllowReserved bool `protobuf:"varint,9,opt,name=allow_reserved,json=allowReserved,proto3" json:"allow_reserved,omitempty"` + Schema *SchemaOrReference `protobuf:"bytes,10,opt,name=schema,proto3" json:"schema,omitempty"` + Example *Any `protobuf:"bytes,11,opt,name=example,proto3" json:"example,omitempty"` + Examples *ExamplesOrReferences `protobuf:"bytes,12,opt,name=examples,proto3" json:"examples,omitempty"` + Content *MediaTypes `protobuf:"bytes,13,opt,name=content,proto3" json:"content,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,14,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Parameter) Reset() { + *x = Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameter) ProtoMessage() {} + +func (x *Parameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. +func (*Parameter) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{49} +} + +func (x *Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Parameter) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Parameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *Parameter) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Parameter) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *Parameter) GetStyle() string { + if x != nil { + return x.Style + } + return "" +} + +func (x *Parameter) GetExplode() bool { + if x != nil { + return x.Explode + } + return false +} + +func (x *Parameter) GetAllowReserved() bool { + if x != nil { + return x.AllowReserved + } + return false +} + +func (x *Parameter) GetSchema() *SchemaOrReference { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Parameter) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Parameter) GetExamples() *ExamplesOrReferences { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Parameter) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *Parameter) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ParameterOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ParameterOrReference_Parameter + // *ParameterOrReference_Reference + Oneof isParameterOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ParameterOrReference) Reset() { + *x = ParameterOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParameterOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParameterOrReference) ProtoMessage() {} + +func (x *ParameterOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParameterOrReference.ProtoReflect.Descriptor instead. +func (*ParameterOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{50} +} + +func (m *ParameterOrReference) GetOneof() isParameterOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ParameterOrReference) GetParameter() *Parameter { + if x, ok := x.GetOneof().(*ParameterOrReference_Parameter); ok { + return x.Parameter + } + return nil +} + +func (x *ParameterOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*ParameterOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isParameterOrReference_Oneof interface { + isParameterOrReference_Oneof() +} + +type ParameterOrReference_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParameterOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*ParameterOrReference_Parameter) isParameterOrReference_Oneof() {} + +func (*ParameterOrReference_Reference) isParameterOrReference_Oneof() {} + +type ParametersOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedParameterOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ParametersOrReferences) Reset() { + *x = ParametersOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParametersOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParametersOrReferences) ProtoMessage() {} + +func (x *ParametersOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParametersOrReferences.ProtoReflect.Descriptor instead. +func (*ParametersOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{51} +} + +func (x *ParametersOrReferences) GetAdditionalProperties() []*NamedParameterOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available. +type PathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Get *Operation `protobuf:"bytes,4,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,5,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,6,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,7,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,8,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,9,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,10,opt,name=patch,proto3" json:"patch,omitempty"` + Trace *Operation `protobuf:"bytes,11,opt,name=trace,proto3" json:"trace,omitempty"` + Servers []*Server `protobuf:"bytes,12,rep,name=servers,proto3" json:"servers,omitempty"` + Parameters []*ParameterOrReference `protobuf:"bytes,13,rep,name=parameters,proto3" json:"parameters,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,14,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *PathItem) Reset() { + *x = PathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathItem) ProtoMessage() {} + +func (x *PathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. +func (*PathItem) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{52} +} + +func (x *PathItem) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *PathItem) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *PathItem) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *PathItem) GetGet() *Operation { + if x != nil { + return x.Get + } + return nil +} + +func (x *PathItem) GetPut() *Operation { + if x != nil { + return x.Put + } + return nil +} + +func (x *PathItem) GetPost() *Operation { + if x != nil { + return x.Post + } + return nil +} + +func (x *PathItem) GetDelete() *Operation { + if x != nil { + return x.Delete + } + return nil +} + +func (x *PathItem) GetOptions() *Operation { + if x != nil { + return x.Options + } + return nil +} + +func (x *PathItem) GetHead() *Operation { + if x != nil { + return x.Head + } + return nil +} + +func (x *PathItem) GetPatch() *Operation { + if x != nil { + return x.Patch + } + return nil +} + +func (x *PathItem) GetTrace() *Operation { + if x != nil { + return x.Trace + } + return nil +} + +func (x *PathItem) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +func (x *PathItem) GetParameters() []*ParameterOrReference { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *PathItem) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints. +type Paths struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path []*NamedPathItem `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,2,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{53} +} + +func (x *Paths) GetPath() []*NamedPathItem { + if x != nil { + return x.Path + } + return nil +} + +func (x *Paths) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type Properties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchemaOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Properties) Reset() { + *x = Properties{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Properties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Properties) ProtoMessage() {} + +func (x *Properties) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Properties.ProtoReflect.Descriptor instead. +func (*Properties) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{54} +} + +func (x *Properties) GetAdditionalProperties() []*NamedSchemaOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification. +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{55} +} + +func (x *Reference) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *Reference) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Reference) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type RequestBodiesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedRequestBodyOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *RequestBodiesOrReferences) Reset() { + *x = RequestBodiesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBodiesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBodiesOrReferences) ProtoMessage() {} + +func (x *RequestBodiesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBodiesOrReferences.ProtoReflect.Descriptor instead. +func (*RequestBodiesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{56} +} + +func (x *RequestBodiesOrReferences) GetAdditionalProperties() []*NamedRequestBodyOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Describes a single request body. +type RequestBody struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Content *MediaTypes `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` + Required bool `protobuf:"varint,3,opt,name=required,proto3" json:"required,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *RequestBody) Reset() { + *x = RequestBody{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBody) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBody) ProtoMessage() {} + +func (x *RequestBody) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBody.ProtoReflect.Descriptor instead. +func (*RequestBody) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{57} +} + +func (x *RequestBody) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *RequestBody) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *RequestBody) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *RequestBody) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type RequestBodyOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *RequestBodyOrReference_RequestBody + // *RequestBodyOrReference_Reference + Oneof isRequestBodyOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *RequestBodyOrReference) Reset() { + *x = RequestBodyOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBodyOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBodyOrReference) ProtoMessage() {} + +func (x *RequestBodyOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBodyOrReference.ProtoReflect.Descriptor instead. +func (*RequestBodyOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{58} +} + +func (m *RequestBodyOrReference) GetOneof() isRequestBodyOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *RequestBodyOrReference) GetRequestBody() *RequestBody { + if x, ok := x.GetOneof().(*RequestBodyOrReference_RequestBody); ok { + return x.RequestBody + } + return nil +} + +func (x *RequestBodyOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*RequestBodyOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isRequestBodyOrReference_Oneof interface { + isRequestBodyOrReference_Oneof() +} + +type RequestBodyOrReference_RequestBody struct { + RequestBody *RequestBody `protobuf:"bytes,1,opt,name=request_body,json=requestBody,proto3,oneof"` +} + +type RequestBodyOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*RequestBodyOrReference_RequestBody) isRequestBodyOrReference_Oneof() {} + +func (*RequestBodyOrReference_Reference) isRequestBodyOrReference_Oneof() {} + +// Describes a single response from an API Operation, including design-time, static `links` to operations based on the response. +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Headers *HeadersOrReferences `protobuf:"bytes,2,opt,name=headers,proto3" json:"headers,omitempty"` + Content *MediaTypes `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` + Links *LinksOrReferences `protobuf:"bytes,4,opt,name=links,proto3" json:"links,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,5,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{59} +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetHeaders() *HeadersOrReferences { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetContent() *MediaTypes { + if x != nil { + return x.Content + } + return nil +} + +func (x *Response) GetLinks() *LinksOrReferences { + if x != nil { + return x.Links + } + return nil +} + +func (x *Response) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ResponseOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ResponseOrReference_Response + // *ResponseOrReference_Reference + Oneof isResponseOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ResponseOrReference) Reset() { + *x = ResponseOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseOrReference) ProtoMessage() {} + +func (x *ResponseOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseOrReference.ProtoReflect.Descriptor instead. +func (*ResponseOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{60} +} + +func (m *ResponseOrReference) GetOneof() isResponseOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ResponseOrReference) GetResponse() *Response { + if x, ok := x.GetOneof().(*ResponseOrReference_Response); ok { + return x.Response + } + return nil +} + +func (x *ResponseOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*ResponseOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isResponseOrReference_Oneof interface { + isResponseOrReference_Oneof() +} + +type ResponseOrReference_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +} + +type ResponseOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*ResponseOrReference_Response) isResponseOrReference_Oneof() {} + +func (*ResponseOrReference_Reference) isResponseOrReference_Oneof() {} + +// A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call. +type Responses struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Default *ResponseOrReference `protobuf:"bytes,1,opt,name=default,proto3" json:"default,omitempty"` + ResponseOrReference []*NamedResponseOrReference `protobuf:"bytes,2,rep,name=response_or_reference,json=responseOrReference,proto3" json:"response_or_reference,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,3,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Responses) Reset() { + *x = Responses{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Responses) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Responses) ProtoMessage() {} + +func (x *Responses) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Responses.ProtoReflect.Descriptor instead. +func (*Responses) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{61} +} + +func (x *Responses) GetDefault() *ResponseOrReference { + if x != nil { + return x.Default + } + return nil +} + +func (x *Responses) GetResponseOrReference() []*NamedResponseOrReference { + if x != nil { + return x.ResponseOrReference + } + return nil +} + +func (x *Responses) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ResponsesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedResponseOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ResponsesOrReferences) Reset() { + *x = ResponsesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponsesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponsesOrReferences) ProtoMessage() {} + +func (x *ResponsesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponsesOrReferences.ProtoReflect.Descriptor instead. +func (*ResponsesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{62} +} + +func (x *ResponsesOrReferences) GetAdditionalProperties() []*NamedResponseOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nullable bool `protobuf:"varint,1,opt,name=nullable,proto3" json:"nullable,omitempty"` + Discriminator *Discriminator `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + WriteOnly bool `protobuf:"varint,4,opt,name=write_only,json=writeOnly,proto3" json:"write_only,omitempty"` + Xml *Xml `protobuf:"bytes,5,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"` + Deprecated bool `protobuf:"varint,8,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Title string `protobuf:"bytes,9,opt,name=title,proto3" json:"title,omitempty"` + MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,21,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,22,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,23,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,24,rep,name=enum,proto3" json:"enum,omitempty"` + Type string `protobuf:"bytes,25,opt,name=type,proto3" json:"type,omitempty"` + AllOf []*SchemaOrReference `protobuf:"bytes,26,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + OneOf []*SchemaOrReference `protobuf:"bytes,27,rep,name=one_of,json=oneOf,proto3" json:"one_of,omitempty"` + AnyOf []*SchemaOrReference `protobuf:"bytes,28,rep,name=any_of,json=anyOf,proto3" json:"any_of,omitempty"` + Not *Schema `protobuf:"bytes,29,opt,name=not,proto3" json:"not,omitempty"` + Items *ItemsItem `protobuf:"bytes,30,opt,name=items,proto3" json:"items,omitempty"` + Properties *Properties `protobuf:"bytes,31,opt,name=properties,proto3" json:"properties,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,32,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Default *DefaultType `protobuf:"bytes,33,opt,name=default,proto3" json:"default,omitempty"` + Description string `protobuf:"bytes,34,opt,name=description,proto3" json:"description,omitempty"` + Format string `protobuf:"bytes,35,opt,name=format,proto3" json:"format,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,36,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{63} +} + +func (x *Schema) GetNullable() bool { + if x != nil { + return x.Nullable + } + return false +} + +func (x *Schema) GetDiscriminator() *Discriminator { + if x != nil { + return x.Discriminator + } + return nil +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetWriteOnly() bool { + if x != nil { + return x.WriteOnly + } + return false +} + +func (x *Schema) GetXml() *Xml { + if x != nil { + return x.Xml + } + return nil +} + +func (x *Schema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Schema) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Schema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Schema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Schema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Schema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Schema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Schema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Schema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Schema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Schema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Schema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Schema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Schema) GetMaxProperties() int64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *Schema) GetMinProperties() int64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *Schema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *Schema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Schema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Schema) GetAllOf() []*SchemaOrReference { + if x != nil { + return x.AllOf + } + return nil +} + +func (x *Schema) GetOneOf() []*SchemaOrReference { + if x != nil { + return x.OneOf + } + return nil +} + +func (x *Schema) GetAnyOf() []*SchemaOrReference { + if x != nil { + return x.AnyOf + } + return nil +} + +func (x *Schema) GetNot() *Schema { + if x != nil { + return x.Not + } + return nil +} + +func (x *Schema) GetItems() *ItemsItem { + if x != nil { + return x.Items + } + return nil +} + +func (x *Schema) GetProperties() *Properties { + if x != nil { + return x.Properties + } + return nil +} + +func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +func (x *Schema) GetDefault() *DefaultType { + if x != nil { + return x.Default + } + return nil +} + +func (x *Schema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Schema) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type SchemaOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SchemaOrReference_Schema + // *SchemaOrReference_Reference + Oneof isSchemaOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SchemaOrReference) Reset() { + *x = SchemaOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemaOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaOrReference) ProtoMessage() {} + +func (x *SchemaOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemaOrReference.ProtoReflect.Descriptor instead. +func (*SchemaOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{64} +} + +func (m *SchemaOrReference) GetOneof() isSchemaOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SchemaOrReference) GetSchema() *Schema { + if x, ok := x.GetOneof().(*SchemaOrReference_Schema); ok { + return x.Schema + } + return nil +} + +func (x *SchemaOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*SchemaOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isSchemaOrReference_Oneof interface { + isSchemaOrReference_Oneof() +} + +type SchemaOrReference_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type SchemaOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*SchemaOrReference_Schema) isSchemaOrReference_Oneof() {} + +func (*SchemaOrReference_Reference) isSchemaOrReference_Oneof() {} + +type SchemasOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchemaOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SchemasOrReferences) Reset() { + *x = SchemasOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemasOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemasOrReferences) ProtoMessage() {} + +func (x *SchemasOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemasOrReferences.ProtoReflect.Descriptor instead. +func (*SchemasOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{65} +} + +func (x *SchemasOrReferences) GetAdditionalProperties() []*NamedSchemaOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{66} +} + +func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE. +type SecurityScheme struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,4,opt,name=in,proto3" json:"in,omitempty"` + Scheme string `protobuf:"bytes,5,opt,name=scheme,proto3" json:"scheme,omitempty"` + BearerFormat string `protobuf:"bytes,6,opt,name=bearer_format,json=bearerFormat,proto3" json:"bearer_format,omitempty"` + Flows *OauthFlows `protobuf:"bytes,7,opt,name=flows,proto3" json:"flows,omitempty"` + OpenIdConnectUrl string `protobuf:"bytes,8,opt,name=open_id_connect_url,json=openIdConnectUrl,proto3" json:"open_id_connect_url,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,9,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *SecurityScheme) Reset() { + *x = SecurityScheme{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityScheme) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityScheme) ProtoMessage() {} + +func (x *SecurityScheme) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityScheme.ProtoReflect.Descriptor instead. +func (*SecurityScheme) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{67} +} + +func (x *SecurityScheme) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *SecurityScheme) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SecurityScheme) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SecurityScheme) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *SecurityScheme) GetScheme() string { + if x != nil { + return x.Scheme + } + return "" +} + +func (x *SecurityScheme) GetBearerFormat() string { + if x != nil { + return x.BearerFormat + } + return "" +} + +func (x *SecurityScheme) GetFlows() *OauthFlows { + if x != nil { + return x.Flows + } + return nil +} + +func (x *SecurityScheme) GetOpenIdConnectUrl() string { + if x != nil { + return x.OpenIdConnectUrl + } + return "" +} + +func (x *SecurityScheme) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type SecuritySchemeOrReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SecuritySchemeOrReference_SecurityScheme + // *SecuritySchemeOrReference_Reference + Oneof isSecuritySchemeOrReference_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SecuritySchemeOrReference) Reset() { + *x = SecuritySchemeOrReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecuritySchemeOrReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecuritySchemeOrReference) ProtoMessage() {} + +func (x *SecuritySchemeOrReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecuritySchemeOrReference.ProtoReflect.Descriptor instead. +func (*SecuritySchemeOrReference) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{68} +} + +func (m *SecuritySchemeOrReference) GetOneof() isSecuritySchemeOrReference_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SecuritySchemeOrReference) GetSecurityScheme() *SecurityScheme { + if x, ok := x.GetOneof().(*SecuritySchemeOrReference_SecurityScheme); ok { + return x.SecurityScheme + } + return nil +} + +func (x *SecuritySchemeOrReference) GetReference() *Reference { + if x, ok := x.GetOneof().(*SecuritySchemeOrReference_Reference); ok { + return x.Reference + } + return nil +} + +type isSecuritySchemeOrReference_Oneof interface { + isSecuritySchemeOrReference_Oneof() +} + +type SecuritySchemeOrReference_SecurityScheme struct { + SecurityScheme *SecurityScheme `protobuf:"bytes,1,opt,name=security_scheme,json=securityScheme,proto3,oneof"` +} + +type SecuritySchemeOrReference_Reference struct { + Reference *Reference `protobuf:"bytes,2,opt,name=reference,proto3,oneof"` +} + +func (*SecuritySchemeOrReference_SecurityScheme) isSecuritySchemeOrReference_Oneof() {} + +func (*SecuritySchemeOrReference_Reference) isSecuritySchemeOrReference_Oneof() {} + +type SecuritySchemesOrReferences struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSecuritySchemeOrReference `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecuritySchemesOrReferences) Reset() { + *x = SecuritySchemesOrReferences{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecuritySchemesOrReferences) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecuritySchemesOrReferences) ProtoMessage() {} + +func (x *SecuritySchemesOrReferences) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecuritySchemesOrReferences.ProtoReflect.Descriptor instead. +func (*SecuritySchemesOrReferences) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{69} +} + +func (x *SecuritySchemesOrReferences) GetAdditionalProperties() []*NamedSecuritySchemeOrReference { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// An object representing a Server. +type Server struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Variables *ServerVariables `protobuf:"bytes,3,opt,name=variables,proto3" json:"variables,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Server) Reset() { + *x = Server{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Server) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Server) ProtoMessage() {} + +func (x *Server) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Server.ProtoReflect.Descriptor instead. +func (*Server) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{70} +} + +func (x *Server) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Server) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Server) GetVariables() *ServerVariables { + if x != nil { + return x.Variables + } + return nil +} + +func (x *Server) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// An object representing a Server Variable for server URL template substitution. +type ServerVariable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enum []string `protobuf:"bytes,1,rep,name=enum,proto3" json:"enum,omitempty"` + Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *ServerVariable) Reset() { + *x = ServerVariable{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerVariable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerVariable) ProtoMessage() {} + +func (x *ServerVariable) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerVariable.ProtoReflect.Descriptor instead. +func (*ServerVariable) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{71} +} + +func (x *ServerVariable) GetEnum() []string { + if x != nil { + return x.Enum + } + return nil +} + +func (x *ServerVariable) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *ServerVariable) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ServerVariable) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +type ServerVariables struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedServerVariable `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ServerVariables) Reset() { + *x = ServerVariables{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerVariables) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerVariables) ProtoMessage() {} + +func (x *ServerVariables) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerVariables.ProtoReflect.Descriptor instead. +func (*ServerVariables) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{72} +} + +func (x *ServerVariables) GetAdditionalProperties() []*NamedServerVariable { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Any property starting with x- is valid. +type SpecificationExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SpecificationExtension_Number + // *SpecificationExtension_Boolean + // *SpecificationExtension_String_ + Oneof isSpecificationExtension_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SpecificationExtension) Reset() { + *x = SpecificationExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpecificationExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpecificationExtension) ProtoMessage() {} + +func (x *SpecificationExtension) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpecificationExtension.ProtoReflect.Descriptor instead. +func (*SpecificationExtension) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{73} +} + +func (m *SpecificationExtension) GetOneof() isSpecificationExtension_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SpecificationExtension) GetNumber() float64 { + if x, ok := x.GetOneof().(*SpecificationExtension_Number); ok { + return x.Number + } + return 0 +} + +func (x *SpecificationExtension) GetBoolean() bool { + if x, ok := x.GetOneof().(*SpecificationExtension_Boolean); ok { + return x.Boolean + } + return false +} + +func (x *SpecificationExtension) GetString_() string { + if x, ok := x.GetOneof().(*SpecificationExtension_String_); ok { + return x.String_ + } + return "" +} + +type isSpecificationExtension_Oneof interface { + isSpecificationExtension_Oneof() +} + +type SpecificationExtension_Number struct { + Number float64 `protobuf:"fixed64,1,opt,name=number,proto3,oneof"` +} + +type SpecificationExtension_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +type SpecificationExtension_String_ struct { + String_ string `protobuf:"bytes,3,opt,name=string,proto3,oneof"` +} + +func (*SpecificationExtension_Number) isSpecificationExtension_Oneof() {} + +func (*SpecificationExtension_Boolean) isSpecificationExtension_Oneof() {} + +func (*SpecificationExtension_String_) isSpecificationExtension_Oneof() {} + +type StringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *StringArray) Reset() { + *x = StringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringArray) ProtoMessage() {} + +func (x *StringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. +func (*StringArray) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{74} +} + +func (x *StringArray) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +type Strings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Strings) Reset() { + *x = Strings{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Strings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Strings) ProtoMessage() {} + +func (x *Strings) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Strings.ProtoReflect.Descriptor instead. +func (*Strings) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{75} +} + +func (x *Strings) GetAdditionalProperties() []*NamedString { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,4,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. +func (*Tag) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{76} +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +// A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior. +type Xml struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + SpecificationExtension []*NamedAny `protobuf:"bytes,6,rep,name=specification_extension,json=specificationExtension,proto3" json:"specification_extension,omitempty"` +} + +func (x *Xml) Reset() { + *x = Xml{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Xml) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Xml) ProtoMessage() {} + +func (x *Xml) ProtoReflect() protoreflect.Message { + mi := &file_openapiv3_OpenAPIv3_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Xml.ProtoReflect.Descriptor instead. +func (*Xml) Descriptor() ([]byte, []int) { + return file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{77} +} + +func (x *Xml) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Xml) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Xml) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *Xml) GetAttribute() bool { + if x != nil { + return x.Attribute + } + return false +} + +func (x *Xml) GetWrapped() bool { + if x != nil { + return x.Wrapped + } + return false +} + +func (x *Xml) GetSpecificationExtension() []*NamedAny { + if x != nil { + return x.SpecificationExtension + } + return nil +} + +var File_openapiv3_OpenAPIv3_proto protoreflect.FileDescriptor + +var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, + 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x90, 0x01, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, + 0x4f, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x11, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x1a, 0x0a, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0x79, 0x0a, 0x0f, + 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x23, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, + 0x03, 0x61, 0x6e, 0x79, 0x12, 0x38, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x07, + 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x43, 0x61, 0x6c, 0x6c, + 0x62, 0x61, 0x63, 0x6b, 0x12, 0x2d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x13, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x63, 0x61, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, + 0x63, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x35, + 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x72, + 0x0a, 0x15, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x22, 0xac, 0x05, 0x0a, 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x39, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x3f, 0x0a, 0x09, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x42, 0x0a, + 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, + 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x69, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x69, + 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0d, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0f, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x05, + 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x75, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x66, 0x0a, 0x0b, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x1a, 0x0a, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x18, 0x0a, + 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x06, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x22, 0xb2, 0x01, 0x0a, 0x0d, 0x44, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x07, 0x6d, + 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xc9, 0x03, 0x0a, 0x08, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x12, 0x24, 0x0a, 0x04, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, + 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x23, + 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, + 0x63, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x8e, 0x02, 0x0a, 0x08, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x39, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x79, + 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x5b, 0x0a, 0x09, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, + 0xe2, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x35, 0x0a, 0x09, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x70, 0x0a, 0x14, + 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x57, + 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x0c, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x17, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8a, 0x04, 0x0a, 0x06, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, + 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x11, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, + 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x09, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x6e, 0x0a, 0x13, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc9, 0x02, 0x0a, + 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, + 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, + 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5a, 0x0a, 0x09, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x4d, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x22, 0x7e, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xe8, 0x02, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x23, 0x0a, + 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x66, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x12, 0x3e, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, + 0x64, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x4f, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x79, 0x0a, 0x0f, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, + 0x6e, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x6a, 0x0a, 0x11, 0x4c, 0x69, + 0x6e, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xad, 0x02, 0x0a, 0x09, 0x4d, 0x65, 0x64, 0x69, 0x61, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, 0x0a, 0x07, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x08, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x0a, 0x0a, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x45, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x65, 0x0a, 0x18, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x4f, 0x72, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x17, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x45, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x61, 0x0a, 0x16, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5d, 0x0a, 0x14, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, + 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x67, + 0x0a, 0x19, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, + 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x6b, 0x0a, 0x1b, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, + 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x65, 0x0a, 0x18, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x61, 0x0a, 0x16, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x71, 0x0a, 0x1e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, + 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x5b, 0x0a, 0x13, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x37, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0xf2, 0x01, 0x0a, 0x09, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xcd, 0x02, 0x0a, 0x0a, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, + 0x6f, 0x77, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x08, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x44, 0x0a, 0x12, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x11, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, + 0x44, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, + 0x6f, 0x77, 0x52, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x53, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x49, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x05, 0x0a, 0x09, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x33, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x09, 0x63, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, + 0x72, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xb1, 0x04, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x74, 0x79, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x70, 0x6c, 0x6f, 0x64, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x29, 0x0a, 0x07, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x4f, + 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x08, 0x65, 0x78, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8d, 0x01, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, + 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x16, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, + 0x12, 0x5a, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xfa, 0x04, 0x0a, + 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, + 0x74, 0x12, 0x27, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, + 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, + 0x12, 0x2b, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2b, 0x0a, + 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0x0a, 0x05, 0x50, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x65, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x79, 0x0a, 0x19, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, + 0x6f, 0x64, 0x69, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x73, 0x12, 0x5c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, + 0xcc, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x96, + 0x01, 0x0a, 0x16, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, + 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4f, 0x72, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x30, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, + 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x22, 0xef, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x58, 0x0a, 0x15, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x52, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x59, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xaf, 0x0b, 0x0a, 0x06, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x3f, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, + 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1d, + 0x0a, 0x0a, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, + 0x03, 0x78, 0x6d, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, + 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, + 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, + 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, + 0x74, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, + 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x61, + 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, + 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x17, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x23, + 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, + 0x66, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, 0x66, 0x12, 0x34, 0x0a, + 0x06, 0x6f, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x6f, 0x6e, + 0x65, 0x4f, 0x66, 0x12, 0x34, 0x0a, 0x06, 0x61, 0x6e, 0x79, 0x5f, 0x6f, 0x66, 0x18, 0x1c, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x52, 0x05, 0x61, 0x6e, 0x79, 0x4f, 0x66, 0x12, 0x24, 0x0a, 0x03, 0x6e, 0x6f, 0x74, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x03, 0x6e, 0x6f, 0x74, 0x12, + 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x36, 0x0a, 0x0a, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x20, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, + 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x31, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x23, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4d, 0x0a, 0x17, + 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x11, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, + 0x6e, 0x0a, 0x13, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x72, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, + 0x68, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xd3, 0x02, 0x0a, 0x0e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x2c, 0x0a, 0x05, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4f, 0x61, 0x75, 0x74, 0x68, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x6f, 0x70, 0x65, 0x6e, 0x49, 0x64, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, + 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0xa2, 0x01, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x45, 0x0a, + 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, + 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x65, 0x4f, 0x72, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x14, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x52, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x4d, + 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x01, + 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x20, + 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x67, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x54, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x07, + 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x57, 0x0a, 0x07, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc9, 0x01, 0x0a, 0x03, 0x54, 0x61, + 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd6, 0x01, 0x0a, 0x03, 0x58, 0x6d, 0x6c, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x12, + 0x4d, 0x0a, 0x17, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, + 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, + 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_openapiv3_OpenAPIv3_proto_rawDescOnce sync.Once + file_openapiv3_OpenAPIv3_proto_rawDescData = file_openapiv3_OpenAPIv3_proto_rawDesc +) + +func file_openapiv3_OpenAPIv3_proto_rawDescGZIP() []byte { + file_openapiv3_OpenAPIv3_proto_rawDescOnce.Do(func() { + file_openapiv3_OpenAPIv3_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv3_OpenAPIv3_proto_rawDescData) + }) + return file_openapiv3_OpenAPIv3_proto_rawDescData +} + +var file_openapiv3_OpenAPIv3_proto_msgTypes = make([]protoimpl.MessageInfo, 78) +var file_openapiv3_OpenAPIv3_proto_goTypes = []interface{}{ + (*AdditionalPropertiesItem)(nil), // 0: openapi.v3.AdditionalPropertiesItem + (*Any)(nil), // 1: openapi.v3.Any + (*AnyOrExpression)(nil), // 2: openapi.v3.AnyOrExpression + (*Callback)(nil), // 3: openapi.v3.Callback + (*CallbackOrReference)(nil), // 4: openapi.v3.CallbackOrReference + (*CallbacksOrReferences)(nil), // 5: openapi.v3.CallbacksOrReferences + (*Components)(nil), // 6: openapi.v3.Components + (*Contact)(nil), // 7: openapi.v3.Contact + (*DefaultType)(nil), // 8: openapi.v3.DefaultType + (*Discriminator)(nil), // 9: openapi.v3.Discriminator + (*Document)(nil), // 10: openapi.v3.Document + (*Encoding)(nil), // 11: openapi.v3.Encoding + (*Encodings)(nil), // 12: openapi.v3.Encodings + (*Example)(nil), // 13: openapi.v3.Example + (*ExampleOrReference)(nil), // 14: openapi.v3.ExampleOrReference + (*ExamplesOrReferences)(nil), // 15: openapi.v3.ExamplesOrReferences + (*Expression)(nil), // 16: openapi.v3.Expression + (*ExternalDocs)(nil), // 17: openapi.v3.ExternalDocs + (*Header)(nil), // 18: openapi.v3.Header + (*HeaderOrReference)(nil), // 19: openapi.v3.HeaderOrReference + (*HeadersOrReferences)(nil), // 20: openapi.v3.HeadersOrReferences + (*Info)(nil), // 21: openapi.v3.Info + (*ItemsItem)(nil), // 22: openapi.v3.ItemsItem + (*License)(nil), // 23: openapi.v3.License + (*Link)(nil), // 24: openapi.v3.Link + (*LinkOrReference)(nil), // 25: openapi.v3.LinkOrReference + (*LinksOrReferences)(nil), // 26: openapi.v3.LinksOrReferences + (*MediaType)(nil), // 27: openapi.v3.MediaType + (*MediaTypes)(nil), // 28: openapi.v3.MediaTypes + (*NamedAny)(nil), // 29: openapi.v3.NamedAny + (*NamedCallbackOrReference)(nil), // 30: openapi.v3.NamedCallbackOrReference + (*NamedEncoding)(nil), // 31: openapi.v3.NamedEncoding + (*NamedExampleOrReference)(nil), // 32: openapi.v3.NamedExampleOrReference + (*NamedHeaderOrReference)(nil), // 33: openapi.v3.NamedHeaderOrReference + (*NamedLinkOrReference)(nil), // 34: openapi.v3.NamedLinkOrReference + (*NamedMediaType)(nil), // 35: openapi.v3.NamedMediaType + (*NamedParameterOrReference)(nil), // 36: openapi.v3.NamedParameterOrReference + (*NamedPathItem)(nil), // 37: openapi.v3.NamedPathItem + (*NamedRequestBodyOrReference)(nil), // 38: openapi.v3.NamedRequestBodyOrReference + (*NamedResponseOrReference)(nil), // 39: openapi.v3.NamedResponseOrReference + (*NamedSchemaOrReference)(nil), // 40: openapi.v3.NamedSchemaOrReference + (*NamedSecuritySchemeOrReference)(nil), // 41: openapi.v3.NamedSecuritySchemeOrReference + (*NamedServerVariable)(nil), // 42: openapi.v3.NamedServerVariable + (*NamedString)(nil), // 43: openapi.v3.NamedString + (*NamedStringArray)(nil), // 44: openapi.v3.NamedStringArray + (*OauthFlow)(nil), // 45: openapi.v3.OauthFlow + (*OauthFlows)(nil), // 46: openapi.v3.OauthFlows + (*Object)(nil), // 47: openapi.v3.Object + (*Operation)(nil), // 48: openapi.v3.Operation + (*Parameter)(nil), // 49: openapi.v3.Parameter + (*ParameterOrReference)(nil), // 50: openapi.v3.ParameterOrReference + (*ParametersOrReferences)(nil), // 51: openapi.v3.ParametersOrReferences + (*PathItem)(nil), // 52: openapi.v3.PathItem + (*Paths)(nil), // 53: openapi.v3.Paths + (*Properties)(nil), // 54: openapi.v3.Properties + (*Reference)(nil), // 55: openapi.v3.Reference + (*RequestBodiesOrReferences)(nil), // 56: openapi.v3.RequestBodiesOrReferences + (*RequestBody)(nil), // 57: openapi.v3.RequestBody + (*RequestBodyOrReference)(nil), // 58: openapi.v3.RequestBodyOrReference + (*Response)(nil), // 59: openapi.v3.Response + (*ResponseOrReference)(nil), // 60: openapi.v3.ResponseOrReference + (*Responses)(nil), // 61: openapi.v3.Responses + (*ResponsesOrReferences)(nil), // 62: openapi.v3.ResponsesOrReferences + (*Schema)(nil), // 63: openapi.v3.Schema + (*SchemaOrReference)(nil), // 64: openapi.v3.SchemaOrReference + (*SchemasOrReferences)(nil), // 65: openapi.v3.SchemasOrReferences + (*SecurityRequirement)(nil), // 66: openapi.v3.SecurityRequirement + (*SecurityScheme)(nil), // 67: openapi.v3.SecurityScheme + (*SecuritySchemeOrReference)(nil), // 68: openapi.v3.SecuritySchemeOrReference + (*SecuritySchemesOrReferences)(nil), // 69: openapi.v3.SecuritySchemesOrReferences + (*Server)(nil), // 70: openapi.v3.Server + (*ServerVariable)(nil), // 71: openapi.v3.ServerVariable + (*ServerVariables)(nil), // 72: openapi.v3.ServerVariables + (*SpecificationExtension)(nil), // 73: openapi.v3.SpecificationExtension + (*StringArray)(nil), // 74: openapi.v3.StringArray + (*Strings)(nil), // 75: openapi.v3.Strings + (*Tag)(nil), // 76: openapi.v3.Tag + (*Xml)(nil), // 77: openapi.v3.Xml + (*anypb.Any)(nil), // 78: google.protobuf.Any +} +var file_openapiv3_OpenAPIv3_proto_depIdxs = []int32{ + 64, // 0: openapi.v3.AdditionalPropertiesItem.schema_or_reference:type_name -> openapi.v3.SchemaOrReference + 78, // 1: openapi.v3.Any.value:type_name -> google.protobuf.Any + 1, // 2: openapi.v3.AnyOrExpression.any:type_name -> openapi.v3.Any + 16, // 3: openapi.v3.AnyOrExpression.expression:type_name -> openapi.v3.Expression + 37, // 4: openapi.v3.Callback.path:type_name -> openapi.v3.NamedPathItem + 29, // 5: openapi.v3.Callback.specification_extension:type_name -> openapi.v3.NamedAny + 3, // 6: openapi.v3.CallbackOrReference.callback:type_name -> openapi.v3.Callback + 55, // 7: openapi.v3.CallbackOrReference.reference:type_name -> openapi.v3.Reference + 30, // 8: openapi.v3.CallbacksOrReferences.additional_properties:type_name -> openapi.v3.NamedCallbackOrReference + 65, // 9: openapi.v3.Components.schemas:type_name -> openapi.v3.SchemasOrReferences + 62, // 10: openapi.v3.Components.responses:type_name -> openapi.v3.ResponsesOrReferences + 51, // 11: openapi.v3.Components.parameters:type_name -> openapi.v3.ParametersOrReferences + 15, // 12: openapi.v3.Components.examples:type_name -> openapi.v3.ExamplesOrReferences + 56, // 13: openapi.v3.Components.request_bodies:type_name -> openapi.v3.RequestBodiesOrReferences + 20, // 14: openapi.v3.Components.headers:type_name -> openapi.v3.HeadersOrReferences + 69, // 15: openapi.v3.Components.security_schemes:type_name -> openapi.v3.SecuritySchemesOrReferences + 26, // 16: openapi.v3.Components.links:type_name -> openapi.v3.LinksOrReferences + 5, // 17: openapi.v3.Components.callbacks:type_name -> openapi.v3.CallbacksOrReferences + 29, // 18: openapi.v3.Components.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 19: openapi.v3.Contact.specification_extension:type_name -> openapi.v3.NamedAny + 75, // 20: openapi.v3.Discriminator.mapping:type_name -> openapi.v3.Strings + 29, // 21: openapi.v3.Discriminator.specification_extension:type_name -> openapi.v3.NamedAny + 21, // 22: openapi.v3.Document.info:type_name -> openapi.v3.Info + 70, // 23: openapi.v3.Document.servers:type_name -> openapi.v3.Server + 53, // 24: openapi.v3.Document.paths:type_name -> openapi.v3.Paths + 6, // 25: openapi.v3.Document.components:type_name -> openapi.v3.Components + 66, // 26: openapi.v3.Document.security:type_name -> openapi.v3.SecurityRequirement + 76, // 27: openapi.v3.Document.tags:type_name -> openapi.v3.Tag + 17, // 28: openapi.v3.Document.external_docs:type_name -> openapi.v3.ExternalDocs + 29, // 29: openapi.v3.Document.specification_extension:type_name -> openapi.v3.NamedAny + 20, // 30: openapi.v3.Encoding.headers:type_name -> openapi.v3.HeadersOrReferences + 29, // 31: openapi.v3.Encoding.specification_extension:type_name -> openapi.v3.NamedAny + 31, // 32: openapi.v3.Encodings.additional_properties:type_name -> openapi.v3.NamedEncoding + 1, // 33: openapi.v3.Example.value:type_name -> openapi.v3.Any + 29, // 34: openapi.v3.Example.specification_extension:type_name -> openapi.v3.NamedAny + 13, // 35: openapi.v3.ExampleOrReference.example:type_name -> openapi.v3.Example + 55, // 36: openapi.v3.ExampleOrReference.reference:type_name -> openapi.v3.Reference + 32, // 37: openapi.v3.ExamplesOrReferences.additional_properties:type_name -> openapi.v3.NamedExampleOrReference + 29, // 38: openapi.v3.Expression.additional_properties:type_name -> openapi.v3.NamedAny + 29, // 39: openapi.v3.ExternalDocs.specification_extension:type_name -> openapi.v3.NamedAny + 64, // 40: openapi.v3.Header.schema:type_name -> openapi.v3.SchemaOrReference + 1, // 41: openapi.v3.Header.example:type_name -> openapi.v3.Any + 15, // 42: openapi.v3.Header.examples:type_name -> openapi.v3.ExamplesOrReferences + 28, // 43: openapi.v3.Header.content:type_name -> openapi.v3.MediaTypes + 29, // 44: openapi.v3.Header.specification_extension:type_name -> openapi.v3.NamedAny + 18, // 45: openapi.v3.HeaderOrReference.header:type_name -> openapi.v3.Header + 55, // 46: openapi.v3.HeaderOrReference.reference:type_name -> openapi.v3.Reference + 33, // 47: openapi.v3.HeadersOrReferences.additional_properties:type_name -> openapi.v3.NamedHeaderOrReference + 7, // 48: openapi.v3.Info.contact:type_name -> openapi.v3.Contact + 23, // 49: openapi.v3.Info.license:type_name -> openapi.v3.License + 29, // 50: openapi.v3.Info.specification_extension:type_name -> openapi.v3.NamedAny + 64, // 51: openapi.v3.ItemsItem.schema_or_reference:type_name -> openapi.v3.SchemaOrReference + 29, // 52: openapi.v3.License.specification_extension:type_name -> openapi.v3.NamedAny + 2, // 53: openapi.v3.Link.parameters:type_name -> openapi.v3.AnyOrExpression + 2, // 54: openapi.v3.Link.request_body:type_name -> openapi.v3.AnyOrExpression + 70, // 55: openapi.v3.Link.server:type_name -> openapi.v3.Server + 29, // 56: openapi.v3.Link.specification_extension:type_name -> openapi.v3.NamedAny + 24, // 57: openapi.v3.LinkOrReference.link:type_name -> openapi.v3.Link + 55, // 58: openapi.v3.LinkOrReference.reference:type_name -> openapi.v3.Reference + 34, // 59: openapi.v3.LinksOrReferences.additional_properties:type_name -> openapi.v3.NamedLinkOrReference + 64, // 60: openapi.v3.MediaType.schema:type_name -> openapi.v3.SchemaOrReference + 1, // 61: openapi.v3.MediaType.example:type_name -> openapi.v3.Any + 15, // 62: openapi.v3.MediaType.examples:type_name -> openapi.v3.ExamplesOrReferences + 12, // 63: openapi.v3.MediaType.encoding:type_name -> openapi.v3.Encodings + 29, // 64: openapi.v3.MediaType.specification_extension:type_name -> openapi.v3.NamedAny + 35, // 65: openapi.v3.MediaTypes.additional_properties:type_name -> openapi.v3.NamedMediaType + 1, // 66: openapi.v3.NamedAny.value:type_name -> openapi.v3.Any + 4, // 67: openapi.v3.NamedCallbackOrReference.value:type_name -> openapi.v3.CallbackOrReference + 11, // 68: openapi.v3.NamedEncoding.value:type_name -> openapi.v3.Encoding + 14, // 69: openapi.v3.NamedExampleOrReference.value:type_name -> openapi.v3.ExampleOrReference + 19, // 70: openapi.v3.NamedHeaderOrReference.value:type_name -> openapi.v3.HeaderOrReference + 25, // 71: openapi.v3.NamedLinkOrReference.value:type_name -> openapi.v3.LinkOrReference + 27, // 72: openapi.v3.NamedMediaType.value:type_name -> openapi.v3.MediaType + 50, // 73: openapi.v3.NamedParameterOrReference.value:type_name -> openapi.v3.ParameterOrReference + 52, // 74: openapi.v3.NamedPathItem.value:type_name -> openapi.v3.PathItem + 58, // 75: openapi.v3.NamedRequestBodyOrReference.value:type_name -> openapi.v3.RequestBodyOrReference + 60, // 76: openapi.v3.NamedResponseOrReference.value:type_name -> openapi.v3.ResponseOrReference + 64, // 77: openapi.v3.NamedSchemaOrReference.value:type_name -> openapi.v3.SchemaOrReference + 68, // 78: openapi.v3.NamedSecuritySchemeOrReference.value:type_name -> openapi.v3.SecuritySchemeOrReference + 71, // 79: openapi.v3.NamedServerVariable.value:type_name -> openapi.v3.ServerVariable + 74, // 80: openapi.v3.NamedStringArray.value:type_name -> openapi.v3.StringArray + 75, // 81: openapi.v3.OauthFlow.scopes:type_name -> openapi.v3.Strings + 29, // 82: openapi.v3.OauthFlow.specification_extension:type_name -> openapi.v3.NamedAny + 45, // 83: openapi.v3.OauthFlows.implicit:type_name -> openapi.v3.OauthFlow + 45, // 84: openapi.v3.OauthFlows.password:type_name -> openapi.v3.OauthFlow + 45, // 85: openapi.v3.OauthFlows.client_credentials:type_name -> openapi.v3.OauthFlow + 45, // 86: openapi.v3.OauthFlows.authorization_code:type_name -> openapi.v3.OauthFlow + 29, // 87: openapi.v3.OauthFlows.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 88: openapi.v3.Object.additional_properties:type_name -> openapi.v3.NamedAny + 17, // 89: openapi.v3.Operation.external_docs:type_name -> openapi.v3.ExternalDocs + 50, // 90: openapi.v3.Operation.parameters:type_name -> openapi.v3.ParameterOrReference + 58, // 91: openapi.v3.Operation.request_body:type_name -> openapi.v3.RequestBodyOrReference + 61, // 92: openapi.v3.Operation.responses:type_name -> openapi.v3.Responses + 5, // 93: openapi.v3.Operation.callbacks:type_name -> openapi.v3.CallbacksOrReferences + 66, // 94: openapi.v3.Operation.security:type_name -> openapi.v3.SecurityRequirement + 70, // 95: openapi.v3.Operation.servers:type_name -> openapi.v3.Server + 29, // 96: openapi.v3.Operation.specification_extension:type_name -> openapi.v3.NamedAny + 64, // 97: openapi.v3.Parameter.schema:type_name -> openapi.v3.SchemaOrReference + 1, // 98: openapi.v3.Parameter.example:type_name -> openapi.v3.Any + 15, // 99: openapi.v3.Parameter.examples:type_name -> openapi.v3.ExamplesOrReferences + 28, // 100: openapi.v3.Parameter.content:type_name -> openapi.v3.MediaTypes + 29, // 101: openapi.v3.Parameter.specification_extension:type_name -> openapi.v3.NamedAny + 49, // 102: openapi.v3.ParameterOrReference.parameter:type_name -> openapi.v3.Parameter + 55, // 103: openapi.v3.ParameterOrReference.reference:type_name -> openapi.v3.Reference + 36, // 104: openapi.v3.ParametersOrReferences.additional_properties:type_name -> openapi.v3.NamedParameterOrReference + 48, // 105: openapi.v3.PathItem.get:type_name -> openapi.v3.Operation + 48, // 106: openapi.v3.PathItem.put:type_name -> openapi.v3.Operation + 48, // 107: openapi.v3.PathItem.post:type_name -> openapi.v3.Operation + 48, // 108: openapi.v3.PathItem.delete:type_name -> openapi.v3.Operation + 48, // 109: openapi.v3.PathItem.options:type_name -> openapi.v3.Operation + 48, // 110: openapi.v3.PathItem.head:type_name -> openapi.v3.Operation + 48, // 111: openapi.v3.PathItem.patch:type_name -> openapi.v3.Operation + 48, // 112: openapi.v3.PathItem.trace:type_name -> openapi.v3.Operation + 70, // 113: openapi.v3.PathItem.servers:type_name -> openapi.v3.Server + 50, // 114: openapi.v3.PathItem.parameters:type_name -> openapi.v3.ParameterOrReference + 29, // 115: openapi.v3.PathItem.specification_extension:type_name -> openapi.v3.NamedAny + 37, // 116: openapi.v3.Paths.path:type_name -> openapi.v3.NamedPathItem + 29, // 117: openapi.v3.Paths.specification_extension:type_name -> openapi.v3.NamedAny + 40, // 118: openapi.v3.Properties.additional_properties:type_name -> openapi.v3.NamedSchemaOrReference + 38, // 119: openapi.v3.RequestBodiesOrReferences.additional_properties:type_name -> openapi.v3.NamedRequestBodyOrReference + 28, // 120: openapi.v3.RequestBody.content:type_name -> openapi.v3.MediaTypes + 29, // 121: openapi.v3.RequestBody.specification_extension:type_name -> openapi.v3.NamedAny + 57, // 122: openapi.v3.RequestBodyOrReference.request_body:type_name -> openapi.v3.RequestBody + 55, // 123: openapi.v3.RequestBodyOrReference.reference:type_name -> openapi.v3.Reference + 20, // 124: openapi.v3.Response.headers:type_name -> openapi.v3.HeadersOrReferences + 28, // 125: openapi.v3.Response.content:type_name -> openapi.v3.MediaTypes + 26, // 126: openapi.v3.Response.links:type_name -> openapi.v3.LinksOrReferences + 29, // 127: openapi.v3.Response.specification_extension:type_name -> openapi.v3.NamedAny + 59, // 128: openapi.v3.ResponseOrReference.response:type_name -> openapi.v3.Response + 55, // 129: openapi.v3.ResponseOrReference.reference:type_name -> openapi.v3.Reference + 60, // 130: openapi.v3.Responses.default:type_name -> openapi.v3.ResponseOrReference + 39, // 131: openapi.v3.Responses.response_or_reference:type_name -> openapi.v3.NamedResponseOrReference + 29, // 132: openapi.v3.Responses.specification_extension:type_name -> openapi.v3.NamedAny + 39, // 133: openapi.v3.ResponsesOrReferences.additional_properties:type_name -> openapi.v3.NamedResponseOrReference + 9, // 134: openapi.v3.Schema.discriminator:type_name -> openapi.v3.Discriminator + 77, // 135: openapi.v3.Schema.xml:type_name -> openapi.v3.Xml + 17, // 136: openapi.v3.Schema.external_docs:type_name -> openapi.v3.ExternalDocs + 1, // 137: openapi.v3.Schema.example:type_name -> openapi.v3.Any + 1, // 138: openapi.v3.Schema.enum:type_name -> openapi.v3.Any + 64, // 139: openapi.v3.Schema.all_of:type_name -> openapi.v3.SchemaOrReference + 64, // 140: openapi.v3.Schema.one_of:type_name -> openapi.v3.SchemaOrReference + 64, // 141: openapi.v3.Schema.any_of:type_name -> openapi.v3.SchemaOrReference + 63, // 142: openapi.v3.Schema.not:type_name -> openapi.v3.Schema + 22, // 143: openapi.v3.Schema.items:type_name -> openapi.v3.ItemsItem + 54, // 144: openapi.v3.Schema.properties:type_name -> openapi.v3.Properties + 0, // 145: openapi.v3.Schema.additional_properties:type_name -> openapi.v3.AdditionalPropertiesItem + 8, // 146: openapi.v3.Schema.default:type_name -> openapi.v3.DefaultType + 29, // 147: openapi.v3.Schema.specification_extension:type_name -> openapi.v3.NamedAny + 63, // 148: openapi.v3.SchemaOrReference.schema:type_name -> openapi.v3.Schema + 55, // 149: openapi.v3.SchemaOrReference.reference:type_name -> openapi.v3.Reference + 40, // 150: openapi.v3.SchemasOrReferences.additional_properties:type_name -> openapi.v3.NamedSchemaOrReference + 44, // 151: openapi.v3.SecurityRequirement.additional_properties:type_name -> openapi.v3.NamedStringArray + 46, // 152: openapi.v3.SecurityScheme.flows:type_name -> openapi.v3.OauthFlows + 29, // 153: openapi.v3.SecurityScheme.specification_extension:type_name -> openapi.v3.NamedAny + 67, // 154: openapi.v3.SecuritySchemeOrReference.security_scheme:type_name -> openapi.v3.SecurityScheme + 55, // 155: openapi.v3.SecuritySchemeOrReference.reference:type_name -> openapi.v3.Reference + 41, // 156: openapi.v3.SecuritySchemesOrReferences.additional_properties:type_name -> openapi.v3.NamedSecuritySchemeOrReference + 72, // 157: openapi.v3.Server.variables:type_name -> openapi.v3.ServerVariables + 29, // 158: openapi.v3.Server.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 159: openapi.v3.ServerVariable.specification_extension:type_name -> openapi.v3.NamedAny + 42, // 160: openapi.v3.ServerVariables.additional_properties:type_name -> openapi.v3.NamedServerVariable + 43, // 161: openapi.v3.Strings.additional_properties:type_name -> openapi.v3.NamedString + 17, // 162: openapi.v3.Tag.external_docs:type_name -> openapi.v3.ExternalDocs + 29, // 163: openapi.v3.Tag.specification_extension:type_name -> openapi.v3.NamedAny + 29, // 164: openapi.v3.Xml.specification_extension:type_name -> openapi.v3.NamedAny + 165, // [165:165] is the sub-list for method output_type + 165, // [165:165] is the sub-list for method input_type + 165, // [165:165] is the sub-list for extension type_name + 165, // [165:165] is the sub-list for extension extendee + 0, // [0:165] is the sub-list for field type_name +} + +func init() { file_openapiv3_OpenAPIv3_proto_init() } +func file_openapiv3_OpenAPIv3_proto_init() { + if File_openapiv3_OpenAPIv3_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_openapiv3_OpenAPIv3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalPropertiesItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyOrExpression); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Callback); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallbackOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallbacksOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Components); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Contact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DefaultType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Discriminator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Encoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Encodings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Example); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExampleOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExamplesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expression); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalDocs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadersOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*License); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinkOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LinksOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MediaType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MediaTypes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedCallbackOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedEncoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedExampleOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedHeaderOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedLinkOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedMediaType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedParameterOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedPathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedRequestBodyOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponseOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSchemaOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSecuritySchemeOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedServerVariable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedStringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OauthFlow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OauthFlows); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParameterOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParametersOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Properties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBodiesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBody); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBodyOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Responses); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponsesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemasOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityScheme); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecuritySchemeOrReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecuritySchemesOrReferences); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Server); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerVariable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerVariables); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpecificationExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Strings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Xml); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_openapiv3_OpenAPIv3_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AdditionalPropertiesItem_SchemaOrReference)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*AnyOrExpression_Any)(nil), + (*AnyOrExpression_Expression)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*CallbackOrReference_Callback)(nil), + (*CallbackOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*DefaultType_Number)(nil), + (*DefaultType_Boolean)(nil), + (*DefaultType_String_)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*ExampleOrReference_Example)(nil), + (*ExampleOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*HeaderOrReference_Header)(nil), + (*HeaderOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[25].OneofWrappers = []interface{}{ + (*LinkOrReference_Link)(nil), + (*LinkOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[50].OneofWrappers = []interface{}{ + (*ParameterOrReference_Parameter)(nil), + (*ParameterOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[58].OneofWrappers = []interface{}{ + (*RequestBodyOrReference_RequestBody)(nil), + (*RequestBodyOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[60].OneofWrappers = []interface{}{ + (*ResponseOrReference_Response)(nil), + (*ResponseOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[64].OneofWrappers = []interface{}{ + (*SchemaOrReference_Schema)(nil), + (*SchemaOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[68].OneofWrappers = []interface{}{ + (*SecuritySchemeOrReference_SecurityScheme)(nil), + (*SecuritySchemeOrReference_Reference)(nil), + } + file_openapiv3_OpenAPIv3_proto_msgTypes[73].OneofWrappers = []interface{}{ + (*SpecificationExtension_Number)(nil), + (*SpecificationExtension_Boolean)(nil), + (*SpecificationExtension_String_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_OpenAPIv3_proto_rawDesc, + NumEnums: 0, + NumMessages: 78, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_openapiv3_OpenAPIv3_proto_goTypes, + DependencyIndexes: file_openapiv3_OpenAPIv3_proto_depIdxs, + MessageInfos: file_openapiv3_OpenAPIv3_proto_msgTypes, + }.Build() + File_openapiv3_OpenAPIv3_proto = out.File + file_openapiv3_OpenAPIv3_proto_rawDesc = nil + file_openapiv3_OpenAPIv3_proto_goTypes = nil + file_openapiv3_OpenAPIv3_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto new file mode 100644 index 000000000..1be335b89 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto @@ -0,0 +1,672 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v3; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "./openapiv3;openapi_v3"; + +message AdditionalPropertiesItem { + oneof oneof { + SchemaOrReference schema_or_reference = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message AnyOrExpression { + oneof oneof { + Any any = 1; + Expression expression = 2; + } +} + +// A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation. +message Callback { + repeated NamedPathItem path = 1; + repeated NamedAny specification_extension = 2; +} + +message CallbackOrReference { + oneof oneof { + Callback callback = 1; + Reference reference = 2; + } +} + +message CallbacksOrReferences { + repeated NamedCallbackOrReference additional_properties = 1; +} + +// Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object. +message Components { + SchemasOrReferences schemas = 1; + ResponsesOrReferences responses = 2; + ParametersOrReferences parameters = 3; + ExamplesOrReferences examples = 4; + RequestBodiesOrReferences request_bodies = 5; + HeadersOrReferences headers = 6; + SecuritySchemesOrReferences security_schemes = 7; + LinksOrReferences links = 8; + CallbacksOrReferences callbacks = 9; + repeated NamedAny specification_extension = 10; +} + +// Contact information for the exposed API. +message Contact { + string name = 1; + string url = 2; + string email = 3; + repeated NamedAny specification_extension = 4; +} + +message DefaultType { + oneof oneof { + double number = 1; + bool boolean = 2; + string string = 3; + } +} + +// When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered. +message Discriminator { + string property_name = 1; + Strings mapping = 2; + repeated NamedAny specification_extension = 3; +} + +message Document { + string openapi = 1; + Info info = 2; + repeated Server servers = 3; + Paths paths = 4; + Components components = 5; + repeated SecurityRequirement security = 6; + repeated Tag tags = 7; + ExternalDocs external_docs = 8; + repeated NamedAny specification_extension = 9; +} + +// A single encoding definition applied to a single schema property. +message Encoding { + string content_type = 1; + HeadersOrReferences headers = 2; + string style = 3; + bool explode = 4; + bool allow_reserved = 5; + repeated NamedAny specification_extension = 6; +} + +message Encodings { + repeated NamedEncoding additional_properties = 1; +} + +message Example { + string summary = 1; + string description = 2; + Any value = 3; + string external_value = 4; + repeated NamedAny specification_extension = 5; +} + +message ExampleOrReference { + oneof oneof { + Example example = 1; + Reference reference = 2; + } +} + +message ExamplesOrReferences { + repeated NamedExampleOrReference additional_properties = 1; +} + +message Expression { + repeated NamedAny additional_properties = 1; +} + +// Allows referencing an external resource for extended documentation. +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny specification_extension = 3; +} + +// The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`). +message Header { + string description = 1; + bool required = 2; + bool deprecated = 3; + bool allow_empty_value = 4; + string style = 5; + bool explode = 6; + bool allow_reserved = 7; + SchemaOrReference schema = 8; + Any example = 9; + ExamplesOrReferences examples = 10; + MediaTypes content = 11; + repeated NamedAny specification_extension = 12; +} + +message HeaderOrReference { + oneof oneof { + Header header = 1; + Reference reference = 2; + } +} + +message HeadersOrReferences { + repeated NamedHeaderOrReference additional_properties = 1; +} + +// The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience. +message Info { + string title = 1; + string description = 2; + string terms_of_service = 3; + Contact contact = 4; + License license = 5; + string version = 6; + repeated NamedAny specification_extension = 7; + string summary = 8; +} + +message ItemsItem { + repeated SchemaOrReference schema_or_reference = 1; +} + +// License information for the exposed API. +message License { + string name = 1; + string url = 2; + repeated NamedAny specification_extension = 3; +} + +// The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation. +message Link { + string operation_ref = 1; + string operation_id = 2; + AnyOrExpression parameters = 3; + AnyOrExpression request_body = 4; + string description = 5; + Server server = 6; + repeated NamedAny specification_extension = 7; +} + +message LinkOrReference { + oneof oneof { + Link link = 1; + Reference reference = 2; + } +} + +message LinksOrReferences { + repeated NamedLinkOrReference additional_properties = 1; +} + +// Each Media Type Object provides schema and examples for the media type identified by its key. +message MediaType { + SchemaOrReference schema = 1; + Any example = 2; + ExamplesOrReferences examples = 3; + Encodings encoding = 4; + repeated NamedAny specification_extension = 5; +} + +message MediaTypes { + repeated NamedMediaType additional_properties = 1; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of CallbackOrReference as ordered (name,value) pairs. +message NamedCallbackOrReference { + // Map key + string name = 1; + // Mapped value + CallbackOrReference value = 2; +} + +// Automatically-generated message used to represent maps of Encoding as ordered (name,value) pairs. +message NamedEncoding { + // Map key + string name = 1; + // Mapped value + Encoding value = 2; +} + +// Automatically-generated message used to represent maps of ExampleOrReference as ordered (name,value) pairs. +message NamedExampleOrReference { + // Map key + string name = 1; + // Mapped value + ExampleOrReference value = 2; +} + +// Automatically-generated message used to represent maps of HeaderOrReference as ordered (name,value) pairs. +message NamedHeaderOrReference { + // Map key + string name = 1; + // Mapped value + HeaderOrReference value = 2; +} + +// Automatically-generated message used to represent maps of LinkOrReference as ordered (name,value) pairs. +message NamedLinkOrReference { + // Map key + string name = 1; + // Mapped value + LinkOrReference value = 2; +} + +// Automatically-generated message used to represent maps of MediaType as ordered (name,value) pairs. +message NamedMediaType { + // Map key + string name = 1; + // Mapped value + MediaType value = 2; +} + +// Automatically-generated message used to represent maps of ParameterOrReference as ordered (name,value) pairs. +message NamedParameterOrReference { + // Map key + string name = 1; + // Mapped value + ParameterOrReference value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of RequestBodyOrReference as ordered (name,value) pairs. +message NamedRequestBodyOrReference { + // Map key + string name = 1; + // Mapped value + RequestBodyOrReference value = 2; +} + +// Automatically-generated message used to represent maps of ResponseOrReference as ordered (name,value) pairs. +message NamedResponseOrReference { + // Map key + string name = 1; + // Mapped value + ResponseOrReference value = 2; +} + +// Automatically-generated message used to represent maps of SchemaOrReference as ordered (name,value) pairs. +message NamedSchemaOrReference { + // Map key + string name = 1; + // Mapped value + SchemaOrReference value = 2; +} + +// Automatically-generated message used to represent maps of SecuritySchemeOrReference as ordered (name,value) pairs. +message NamedSecuritySchemeOrReference { + // Map key + string name = 1; + // Mapped value + SecuritySchemeOrReference value = 2; +} + +// Automatically-generated message used to represent maps of ServerVariable as ordered (name,value) pairs. +message NamedServerVariable { + // Map key + string name = 1; + // Mapped value + ServerVariable value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +// Configuration details for a supported OAuth Flow +message OauthFlow { + string authorization_url = 1; + string token_url = 2; + string refresh_url = 3; + Strings scopes = 4; + repeated NamedAny specification_extension = 5; +} + +// Allows configuration of the supported OAuth Flows. +message OauthFlows { + OauthFlow implicit = 1; + OauthFlow password = 2; + OauthFlow client_credentials = 3; + OauthFlow authorization_code = 4; + repeated NamedAny specification_extension = 5; +} + +message Object { + repeated NamedAny additional_properties = 1; +} + +// Describes a single API operation on a path. +message Operation { + repeated string tags = 1; + string summary = 2; + string description = 3; + ExternalDocs external_docs = 4; + string operation_id = 5; + repeated ParameterOrReference parameters = 6; + RequestBodyOrReference request_body = 7; + Responses responses = 8; + CallbacksOrReferences callbacks = 9; + bool deprecated = 10; + repeated SecurityRequirement security = 11; + repeated Server servers = 12; + repeated NamedAny specification_extension = 13; +} + +// Describes a single operation parameter. A unique parameter is defined by a combination of a name and location. +message Parameter { + string name = 1; + string in = 2; + string description = 3; + bool required = 4; + bool deprecated = 5; + bool allow_empty_value = 6; + string style = 7; + bool explode = 8; + bool allow_reserved = 9; + SchemaOrReference schema = 10; + Any example = 11; + ExamplesOrReferences examples = 12; + MediaTypes content = 13; + repeated NamedAny specification_extension = 14; +} + +message ParameterOrReference { + oneof oneof { + Parameter parameter = 1; + Reference reference = 2; + } +} + +message ParametersOrReferences { + repeated NamedParameterOrReference additional_properties = 1; +} + +// Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available. +message PathItem { + string _ref = 1; + string summary = 2; + string description = 3; + Operation get = 4; + Operation put = 5; + Operation post = 6; + Operation delete = 7; + Operation options = 8; + Operation head = 9; + Operation patch = 10; + Operation trace = 11; + repeated Server servers = 12; + repeated ParameterOrReference parameters = 13; + repeated NamedAny specification_extension = 14; +} + +// Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints. +message Paths { + repeated NamedPathItem path = 1; + repeated NamedAny specification_extension = 2; +} + +message Properties { + repeated NamedSchemaOrReference additional_properties = 1; +} + +// A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification. +message Reference { + string _ref = 1; + string summary = 2; + string description = 3; +} + +message RequestBodiesOrReferences { + repeated NamedRequestBodyOrReference additional_properties = 1; +} + +// Describes a single request body. +message RequestBody { + string description = 1; + MediaTypes content = 2; + bool required = 3; + repeated NamedAny specification_extension = 4; +} + +message RequestBodyOrReference { + oneof oneof { + RequestBody request_body = 1; + Reference reference = 2; + } +} + +// Describes a single response from an API Operation, including design-time, static `links` to operations based on the response. +message Response { + string description = 1; + HeadersOrReferences headers = 2; + MediaTypes content = 3; + LinksOrReferences links = 4; + repeated NamedAny specification_extension = 5; +} + +message ResponseOrReference { + oneof oneof { + Response response = 1; + Reference reference = 2; + } +} + +// A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call. +message Responses { + ResponseOrReference default = 1; + repeated NamedResponseOrReference response_or_reference = 2; + repeated NamedAny specification_extension = 3; +} + +message ResponsesOrReferences { + repeated NamedResponseOrReference additional_properties = 1; +} + +// The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema. +message Schema { + bool nullable = 1; + Discriminator discriminator = 2; + bool read_only = 3; + bool write_only = 4; + Xml xml = 5; + ExternalDocs external_docs = 6; + Any example = 7; + bool deprecated = 8; + string title = 9; + double multiple_of = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + int64 max_properties = 21; + int64 min_properties = 22; + repeated string required = 23; + repeated Any enum = 24; + string type = 25; + repeated SchemaOrReference all_of = 26; + repeated SchemaOrReference one_of = 27; + repeated SchemaOrReference any_of = 28; + Schema not = 29; + ItemsItem items = 30; + Properties properties = 31; + AdditionalPropertiesItem additional_properties = 32; + DefaultType default = 33; + string description = 34; + string format = 35; + repeated NamedAny specification_extension = 36; +} + +message SchemaOrReference { + oneof oneof { + Schema schema = 1; + Reference reference = 2; + } +} + +message SchemasOrReferences { + repeated NamedSchemaOrReference additional_properties = 1; +} + +// Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request. +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +// Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE. +message SecurityScheme { + string type = 1; + string description = 2; + string name = 3; + string in = 4; + string scheme = 5; + string bearer_format = 6; + OauthFlows flows = 7; + string open_id_connect_url = 8; + repeated NamedAny specification_extension = 9; +} + +message SecuritySchemeOrReference { + oneof oneof { + SecurityScheme security_scheme = 1; + Reference reference = 2; + } +} + +message SecuritySchemesOrReferences { + repeated NamedSecuritySchemeOrReference additional_properties = 1; +} + +// An object representing a Server. +message Server { + string url = 1; + string description = 2; + ServerVariables variables = 3; + repeated NamedAny specification_extension = 4; +} + +// An object representing a Server Variable for server URL template substitution. +message ServerVariable { + repeated string enum = 1; + string default = 2; + string description = 3; + repeated NamedAny specification_extension = 4; +} + +message ServerVariables { + repeated NamedServerVariable additional_properties = 1; +} + +// Any property starting with x- is valid. +message SpecificationExtension { + oneof oneof { + double number = 1; + bool boolean = 2; + string string = 3; + } +} + +message StringArray { + repeated string value = 1; +} + +message Strings { + repeated NamedString additional_properties = 1; +} + +// Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances. +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny specification_extension = 4; +} + +// A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior. +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny specification_extension = 6; +} + diff --git a/vendor/github.com/google/gnostic-models/openapiv3/README.md b/vendor/github.com/google/gnostic-models/openapiv3/README.md new file mode 100644 index 000000000..5ee12d92e --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/README.md @@ -0,0 +1,21 @@ +# OpenAPI v3 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model and related code for +supporting OpenAPI v3. + +Gnostic applications and plugins can use OpenAPIv3.proto to generate Protocol +Buffer support code for their preferred languages. + +OpenAPIv3.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into +the Protocol Buffer-based datastructures generated from OpenAPIv3.proto. + +OpenAPIv3.proto and OpenAPIv3.go are generated by the Gnostic compiler +generator, and OpenAPIv3.pb.go is generated by protoc, the Protocol Buffer +compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. + +openapi-3.1.json is a JSON schema for OpenAPI 3.1 that is automatically +generated from the OpenAPI 3.1 specification. It is not an official JSON Schema +for OpenAPI. + +The schema-generator directory contains support code which generates +openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). diff --git a/vendor/github.com/google/gnostic-models/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go new file mode 100644 index 000000000..1cee46773 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -0,0 +1,42 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi_v3 + +import ( + "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/compiler" +) + +// ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation. +func ParseDocument(b []byte) (*Document, error) { + info, err := compiler.ReadInfoFromBytes("", b) + if err != nil { + return nil, err + } + root := info.Content[0] + return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) +} + +// YAMLValue produces a serialized YAML representation of the document. +func (d *Document) YAMLValue(comment string) ([]byte, error) { + rawInfo := d.ToRawInfo() + rawInfo = &yaml.Node{ + Kind: yaml.DocumentNode, + Content: []*yaml.Node{rawInfo}, + HeadComment: comment, + } + return yaml.Marshal(rawInfo) +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 86d0903b8..0f5b8a48c 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -5,7 +5,7 @@ // Package cmp determines equality of values. // // This package is intended to be a more powerful and safer alternative to -// reflect.DeepEqual for comparing whether two values are semantically equal. +// [reflect.DeepEqual] for comparing whether two values are semantically equal. // It is intended to only be used in tests, as performance is not a goal and // it may panic if it cannot compare the values. Its propensity towards // panicking means that its unsuitable for production environments where a @@ -13,21 +13,22 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method +// to determine equality. This allows package authors to determine +// the equality operation for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an [Ignore] option +// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) +// or explicitly compared using the [Exporter] option. package cmp import ( @@ -36,50 +37,52 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that +// remain after applying all path filters, value filters, and type filters. +// If at least one [Ignore] exists in S, then the comparison is ignored. +// If the number of [Transformer] and [Comparer] options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single [Transformer], then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single [Comparer], then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. -// If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option -// explicitly permits comparing the unexported field. +// If a struct contains unexported fields, Equal panics unless an [Ignore] option +// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field +// or the [Exporter] option explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. // Empty non-nil slices and nil slices are not equal; to equate empty slices, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Maps are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored map entries report equal. // Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// To use custom comparisons for map keys, consider using +// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. // Empty non-nil maps and nil maps are not equal; to equate empty maps, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively @@ -143,7 +146,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -319,7 +322,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -343,8 +345,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -372,19 +372,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy @@ -654,7 +641,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export.go similarity index 95% rename from vendor/github.com/google/go-cmp/cmp/export_unsafe.go rename to vendor/github.com/google/go-cmp/cmp/export.go index 21eb54858..29f82fe6b 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !purego - package cmp import ( @@ -11,8 +9,6 @@ import ( "unsafe" ) -const supportExporters = true - // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go deleted file mode 100644 index 5ff0b4218..000000000 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build purego - -package cmp - -import "reflect" - -const supportExporters = false - -func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { - panic("no support for forcibly accessing unexported fields") -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 1daaaacc5..36062a604 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 4b91dbcac..a3b97a1ad 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16c..a248e5436 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f0529..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index b6c12cefb..7b498bb2c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -9,6 +9,8 @@ import ( "strconv" ) +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { @@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte // of the same name and within the same package, // but declared within the namespace of different functions. + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go similarity index 97% rename from vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go rename to vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go index a605953d4..e5dfff69a 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !purego - package value import ( diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go deleted file mode 100644 index 44f4a5afd..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build purego - -package value - -import "reflect" - -// Pointer is an opaque typed pointer and is guaranteed to be comparable. -type Pointer struct { - p uintptr - t reflect.Type -} - -// PointerOf returns a Pointer from v, which must be a -// reflect.Ptr, reflect.Slice, or reflect.Map. -func PointerOf(v reflect.Value) Pointer { - // NOTE: Storing a pointer as an uintptr is technically incorrect as it - // assumes that the GC implementation does not use a moving collector. - return Pointer{v.Pointer(), v.Type()} -} - -// IsNil reports whether the pointer is nil. -func (p Pointer) IsNil() bool { - return p.p == 0 -} - -// Uintptr returns the pointer as a uintptr. -func (p Pointer) Uintptr() uintptr { - return p.p -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a2997..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb53..754496f3b 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -13,15 +13,15 @@ import ( "github.com/google/go-cmp/cmp/internal/function" ) -// Option configures for specific behavior of Equal and Diff. In particular, -// the fundamental Option functions (Ignore, Transformer, and Comparer), +// Option configures for specific behavior of [Equal] and [Diff]. In particular, +// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), // configure how equality is determined. // -// The fundamental options may be composed with filters (FilterPath and -// FilterValues) to control the scope over which they are applied. +// The fundamental options may be composed with filters ([FilterPath] and +// [FilterValues]) to control the scope over which they are applied. // -// The cmp/cmpopts package provides helper functions for creating options that -// may be used with Equal and Diff. +// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions +// for creating options that may be used with [Equal] and [Diff]. type Option interface { // filter applies all filters and returns the option that remains. // Each option may only read s.curPath and call s.callTTBFunc. @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -54,9 +56,9 @@ type core struct{} func (core) isCore() {} -// Options is a list of Option values that also satisfies the Option interface. +// Options is a list of [Option] values that also satisfies the [Option] interface. // Helper comparison packages may return an Options value when packing multiple -// Option values into a single Option. When this package processes an Options, +// [Option] values into a single [Option]. When this package processes an Options, // it will be implicitly expanded into a flat list. // // Applying a filter on an Options is equivalent to applying that same filter @@ -103,16 +105,16 @@ func (opts Options) String() string { return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) } -// FilterPath returns a new Option where opt is only evaluated if filter f -// returns true for the current Path in the value tree. +// FilterPath returns a new [Option] where opt is only evaluated if filter f +// returns true for the current [Path] in the value tree. // // This filter is called even if a slice element or map entry is missing and // provides an opportunity to ignore such cases. The filter function must be // symmetric such that the filter result is identical regardless of whether the // missing value is from x or y. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterPath(f func(Path) bool, opt Option) Option { if f == nil { panic("invalid path filter function") @@ -140,7 +142,7 @@ func (f pathFilter) String() string { return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) } -// FilterValues returns a new Option where opt is only evaluated if filter f, +// FilterValues returns a new [Option] where opt is only evaluated if filter f, // which is a function of the form "func(T, T) bool", returns true for the // current pair of values being compared. If either value is invalid or // the type of the values is not assignable to T, then this filter implicitly @@ -152,8 +154,8 @@ func (f pathFilter) String() string { // If T is an interface, it is possible that f is called with two values with // different concrete types that both implement T. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterValues(f interface{}, opt Option) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { @@ -190,9 +192,9 @@ func (f valuesFilter) String() string { return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) } -// Ignore is an Option that causes all comparisons to be ignored. -// This value is intended to be combined with FilterPath or FilterValues. -// It is an error to pass an unfiltered Ignore option to Equal. +// Ignore is an [Option] that causes all comparisons to be ignored. +// This value is intended to be combined with [FilterPath] or [FilterValues]. +// It is an error to pass an unfiltered Ignore option to [Equal]. func Ignore() Option { return ignore{} } type ignore struct{ core } @@ -232,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) { name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" + } else if t.Comparable() { + help = "consider using cmpopts.EquateComparable to compare comparable Go types" } } else { // Unnamed type with unexported fields. Derive PkgPath from field. @@ -252,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*` var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) -// Transformer returns an Option that applies a transformation function that +// Transformer returns an [Option] that applies a transformation function that // converts values of a certain type into that of another. // // The transformer f must be a function "func(T) R" that converts values of @@ -263,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) // same transform to the output of itself (e.g., in the case where the // input and output types are the same), an implicit filter is added such that // a transformer is applicable only if that exact transformer is not already -// in the tail of the Path since the last non-Transform step. +// in the tail of the [Path] since the last non-[Transform] step. // For situations where the implicit filter is still insufficient, -// consider using cmpopts.AcyclicTransformer, which adds a filter -// to prevent the transformer from being recursively applied upon itself. +// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], +// which adds a filter to prevent the transformer from +// being recursively applied upon itself. // -// The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep (and eventually shown in the Diff output). +// The name is a user provided label that is used as the [Transform.Name] in the +// transformation [PathStep] (and eventually shown in the [Diff] output). // The name must be a valid identifier or qualified identifier in Go syntax. // If empty, an arbitrary name is used. func Transformer(name string, f interface{}) Option { @@ -327,7 +332,7 @@ func (tr transformer) String() string { return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) } -// Comparer returns an Option that determines whether two values are equal +// Comparer returns an [Option] that determines whether two values are equal // to each other. // // The comparer f must be a function "func(T, T) bool" and is implicitly @@ -336,9 +341,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -375,35 +380,32 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// Exporter returns an Option that specifies whether Equal is allowed to +// Exporter returns an [Option] that specifies whether [Equal] is allowed to // introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of Equal +// implementation of some external package may cause the result of [Equal] // to unexpectedly change. However, it may be valid to use this option on types // defined in an internal package where the semantic meaning of an unexported // field is in the control of the user. // -// In many cases, a custom Comparer should be used instead that defines +// In many cases, a custom [Comparer] should be used instead that defines // equality as a function of the public API of a type rather than the underlying // unexported implementation. // -// For example, the reflect.Type documentation defines equality to be determined +// For example, the [reflect.Type] documentation defines equality to be determined // by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *regexp.Regexp types are interested +// comparison) and most attempts to compare *[regexp.Regexp] types are interested // in only checking that the regular expression strings are equal. -// Both of these are accomplished using Comparers: +// Both of these are accomplished using [Comparer] options: // // Comparer(func(x, y reflect.Type) bool { return x == y }) // Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) // -// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore -// all unexported fields on specified struct types. +// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] +// option can be used to ignore all unexported fields on specified struct types. func Exporter(f func(reflect.Type) bool) Option { - if !supportExporters { - panic("Exporter is not supported on purego builds") - } return exporter(f) } @@ -413,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO panic("not implemented") } -// AllowUnexported returns an Options that allows Equal to forcibly introspect +// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect // unexported fields of the specified struct types. // -// See Exporter for the proper use of this option. +// See [Exporter] for the proper use of this option. func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see [Reporter]). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags @@ -443,7 +445,7 @@ func (r Result) Equal() bool { } // ByIgnore reports whether the node is equal because it was ignored. -// This never reports true if Equal reports false. +// This never reports true if [Result.Equal] reports false. func (r Result) ByIgnore() bool { return r.flags&reportByIgnore != 0 } @@ -453,7 +455,7 @@ func (r Result) ByMethod() bool { return r.flags&reportByMethod != 0 } -// ByFunc reports whether a Comparer function determined equality. +// ByFunc reports whether a [Comparer] function determined equality. func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } @@ -476,7 +478,7 @@ const ( reportByCycle ) -// Reporter is an Option that can be passed to Equal. When Equal traverses +// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses // the value trees, it calls PushStep as it descends into each node in the // tree and PopStep as it ascend out of the node. The leaves of the tree are // either compared (determined to be equal or not equal) or ignored and reported diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index f01eff318..c3c145642 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -14,9 +14,9 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// Path is a list of PathSteps describing the sequence of operations to get +// Path is a list of [PathStep] describing the sequence of operations to get // from some root type to the current position in the value tree. -// The first Path element is always an operation-less PathStep that exists +// The first Path element is always an operation-less [PathStep] that exists // simply to identify the initial type. // // When traversing structs with embedded structs, the embedded struct will @@ -29,8 +29,13 @@ type Path []PathStep // a value's tree structure. Users of this package never need to implement // these types as values of this type will be returned by this package. // -// Implementations of this interface are -// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +// Implementations of this interface: +// - [StructField] +// - [SliceIndex] +// - [MapIndex] +// - [Indirect] +// - [TypeAssertion] +// - [Transform] type PathStep interface { String() string @@ -41,13 +46,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -70,8 +75,9 @@ func (pa *Path) pop() { *pa = (*pa)[:len(*pa)-1] } -// Last returns the last PathStep in the Path. -// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +// Last returns the last [PathStep] in the Path. +// If the path is empty, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Last() PathStep { return pa.Index(-1) } @@ -79,7 +85,8 @@ func (pa Path) Last() PathStep { // Index returns the ith step in the Path and supports negative indexing. // A negative index starts counting from the tail of the Path such that -1 // refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +// If index is invalid, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Index(i int) PathStep { if i < 0 { i = len(pa) + i @@ -94,6 +101,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +116,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,14 +168,15 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } return fmt.Sprintf("{%s}", s) } -// StructField represents a struct field access on a field called Name. +// StructField is a [PathStep] that represents a struct field access +// on a field called [StructField.Name]. type StructField struct{ *structField } type structField struct { pathStep @@ -178,7 +188,7 @@ type structField struct { unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } @@ -202,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } func (sf StructField) Name() string { return sf.name } // Index is the index of the field in the parent struct type. -// See reflect.Type.Field. +// See [reflect.Type.Field]. func (sf StructField) Index() int { return sf.idx } -// SliceIndex is an index operation on a slice or array at some index Key. +// SliceIndex is a [PathStep] that represents an index operation on +// a slice or array at some index [SliceIndex.Key]. type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep @@ -245,12 +256,12 @@ func (si SliceIndex) Key() int { // all of the indexes to be shifted. If an index is -1, then that // indicates that the element does not exist in the associated slice. // -// Key is guaranteed to return -1 if and only if the indexes returned -// by SplitKeys are not the same. SplitKeys will never return -1 for +// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes +// returned by SplitKeys are not the same. SplitKeys will never return -1 for // both indexes. func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } -// MapIndex is an index operation on a map at some index Key. +// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. type MapIndex struct{ *mapIndex } type mapIndex struct { pathStep @@ -264,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", // Key is the value of the map key. func (mi MapIndex) Key() reflect.Value { return mi.key } -// Indirect represents pointer indirection on the parent type. +// Indirect is a [PathStep] that represents pointer indirection on the parent type. type Indirect struct{ *indirect } type indirect struct { pathStep @@ -274,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ } func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } func (in Indirect) String() string { return "*" } -// TypeAssertion represents a type assertion on an interface. +// TypeAssertion is a [PathStep] that represents a type assertion on an interface. type TypeAssertion struct{ *typeAssertion } type typeAssertion struct { pathStep @@ -282,9 +293,10 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } -// Transform is a transformation from the parent type to the current type. +// Transform is a [PathStep] that represents a transformation +// from the parent type to the current type. type Transform struct{ *transform } type transform struct { pathStep @@ -295,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ } func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } -// Name is the name of the Transformer. +// Name is the name of the [Transformer]. func (tf Transform) Name() string { return tf.trans.name } // Func is the function pointer to the transformer function. func (tf Transform) Func() reflect.Value { return tf.trans.fnc } -// Option returns the originally constructed Transformer option. +// Option returns the originally constructed [Transformer] option. // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb3053..2050bf6b4 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -116,7 +114,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. @@ -245,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 33f03577f..e39f42284 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -192,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, break } sf := t.Field(i) - if supportExporters && !isExported(sf.Name) { + if !isExported(sf.Name) { vv = retrieveUnexportedField(v, sf, true) } s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) @@ -205,12 +212,13 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) - return opts.WithTypeMode(emitType).FormatType(t, out) + skipType = true + return opts.FormatType(t, out) } } @@ -281,7 +289,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -292,7 +305,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 2ad3bc85b..23e444f62 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { @@ -563,10 +564,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ff..388fcf571 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99f..061d72ae0 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1a..97c1b34fd 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a457..b503aae7d 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 000000000..5bb365949 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f938..761520a8c 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 000000000..7ec5ac7ea --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..a502fdc51 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..3e9a61889 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,21 @@ +# uuid +The uuid package generates and inspects UUIDs based on +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +```sh +go get github.com/google/uuid +``` + +###### Documentation +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..dc60082d3 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..b2a0bc871 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This removes the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..c35112927 --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,134 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. +func (uuid UUID) Time() Time { + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..5232b4867 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,365 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 000000000..339a959a7 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 000000000..3167b643d --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index d1cefa871..8b76f1fbf 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -27,7 +27,7 @@ It is ready for production use. [It is used in several projects by Docker, Googl ### Latest release -[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4). +[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6). ### Important note diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 706b22069..44f70a89d 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -9,6 +9,7 @@ package mergo import ( + "fmt" "reflect" ) @@ -127,6 +128,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dstSlice = srcSlice } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) } dst.SetMapIndex(key, dstSlice) @@ -150,6 +154,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dst.Set(src) } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } dst.Set(reflect.AppendSlice(dst, src)) } case reflect.Ptr: diff --git a/vendor/github.com/josharian/intern/README.md b/vendor/github.com/josharian/intern/README.md new file mode 100644 index 000000000..ffc44b219 --- /dev/null +++ b/vendor/github.com/josharian/intern/README.md @@ -0,0 +1,5 @@ +Docs: https://godoc.org/github.com/josharian/intern + +See also [Go issue 5160](https://golang.org/issue/5160). + +License: MIT diff --git a/vendor/github.com/josharian/intern/intern.go b/vendor/github.com/josharian/intern/intern.go new file mode 100644 index 000000000..7acb1fe90 --- /dev/null +++ b/vendor/github.com/josharian/intern/intern.go @@ -0,0 +1,44 @@ +// Package intern interns strings. +// Interning is best effort only. +// Interned strings may be removed automatically +// at any time without notification. +// All functions may be called concurrently +// with themselves and each other. +package intern + +import "sync" + +var ( + pool sync.Pool = sync.Pool{ + New: func() interface{} { + return make(map[string]string) + }, + } +) + +// String returns s, interned. +func String(s string) string { + m := pool.Get().(map[string]string) + c, ok := m[s] + if ok { + pool.Put(m) + return c + } + m[s] = s + pool.Put(m) + return s +} + +// Bytes returns b converted to a string, interned. +func Bytes(b []byte) string { + m := pool.Get().(map[string]string) + c, ok := m[string(b)] + if ok { + pool.Put(m) + return c + } + s := string(b) + m[s] = s + pool.Put(m) + return s +} diff --git a/vendor/github.com/josharian/intern/license.md b/vendor/github.com/josharian/intern/license.md new file mode 100644 index 000000000..353d3055f --- /dev/null +++ b/vendor/github.com/josharian/intern/license.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 000000000..fbff658f7 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 000000000..598a54af9 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,278 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "net" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size >= config.PooledSize { + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) < s { + b.ensureSpaceSlow(s) + } +} + +func (b *Buffer) ensureSpaceSlow(s int) { + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + b.EnsureSpace(1) + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendBytesSlow(data) + } +} + +func (b *Buffer) appendBytesSlow(data []byte) { + for len(data) > 0 { + b.EnsureSpace(1) + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendString appends a string to buffer. +func (b *Buffer) AppendString(data string) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendStringSlow(data) + } +} + +func (b *Buffer) appendStringSlow(data string) { + for len(data) > 0 { + b.EnsureSpace(1) + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + bufs := net.Buffers(b.bufs) + if len(b.Buf) > 0 { + bufs = append(bufs, b.Buf) + } + n, err := bufs.WriteTo(w) + + for _, buf := range b.bufs { + putBuf(buf) + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return int(n), err +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { + if len(b.bufs) == 0 { + ret := b.Buf + b.toPool = nil + b.Buf = nil + return ret + } + + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enough, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go new file mode 100644 index 000000000..ff7b27c5b --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -0,0 +1,24 @@ +// This file will only be included to the build if neither +// easyjson_nounsafe nor appengine build tag is set. See README notes +// for more details. + +//+build !easyjson_nounsafe +//+build !appengine + +package jlexer + +import ( + "reflect" + "unsafe" +) + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go new file mode 100644 index 000000000..864d1be67 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go @@ -0,0 +1,13 @@ +// This file is included to the build if any of the buildtags below +// are defined. Refer to README notes for more details. + +//+build easyjson_nounsafe appengine + +package jlexer + +// bytesToStr creates a string normally from []byte +// +// Note that this method is roughly 1.5x slower than using the 'unsafe' method. +func bytesToStr(data []byte) string { + return string(data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 000000000..e90ec40d0 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 000000000..b5f5e2613 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,1244 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "github.com/josharian/intern" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValueCloned bool // true if byteValue was allocated and does not refer to original json body + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + UseMultipleErrors bool // If we want to use multiple errors. + fatalError error // Fatal error occurred during lexing. It is usually a syntax error. + multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. +} + +// FetchToken scans the input for the next token. +func (r *Lexer) FetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.fatalError = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (isValid bool, length int) { + for { + idx := bytes.IndexByte(data, '"') + if idx == -1 { + return false, len(data) + } + if idx == 0 || (idx > 0 && data[idx-1] != '\\') { + return true, length + idx + } + + // count \\\\\\\ sequences. even number of slashes means quote is not really escaped + cnt := 1 + for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { + cnt++ + } + if cnt%2 == 0 { + return true, length + idx + } + + length += idx + 1 + data = data[idx+1:] + } +} + +// unescapeStringToken performs unescaping of string token. +// if no escaping is needed, original string is returned, otherwise - a new one allocated +func (r *Lexer) unescapeStringToken() (err error) { + data := r.token.byteValue + var unescapedData []byte + + for { + i := bytes.IndexByte(data, '\\') + if i == -1 { + break + } + + escapedRune, escapedBytes, err := decodeEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return err + } + + if unescapedData == nil { + unescapedData = make([]byte, 0, len(r.token.byteValue)) + } + + var d [4]byte + s := utf8.EncodeRune(d[:], escapedRune) + unescapedData = append(unescapedData, data[:i]...) + unescapedData = append(unescapedData, d[:s]...) + + data = data[i+escapedBytes:] + } + + if unescapedData != nil { + r.token.byteValue = append(unescapedData, data...) + r.token.byteValueCloned = true + } + return +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var val rune + for i := 2; i < len(s) && i < 6; i++ { + var v byte + c := s[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return -1 + } + + val <<= 4 + val |= rune(v) + } + return val +} + +// decodeEscape processes a single escape sequence and returns number of bytes processed. +func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { + if len(data) < 2 { + return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") + } + + c := data[1] + switch c { + case '"', '/', '\\': + return rune(c), 2, nil + case 'b': + return '\b', 2, nil + case 'f': + return '\f', 2, nil + case 'n': + return '\n', 2, nil + case 'r': + return '\r', 2, nil + case 't': + return '\t', 2, nil + case 'u': + rr := getu4(data) + if rr < 0 { + return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") + } + + read := 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(data[read:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + read += 6 + rr = dec + } else { + rr = unicode.ReplacementChar + } + } + return rr, read, nil + } + + return 0, 0, errors.New("incorrectly escaped bytes") +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + isValid, length := findStringLen(data) + if !isValid { + r.pos += length + r.errParse("unterminated string literal") + return + } + r.token.byteValue = data[:length] + r.pos += length + 1 // skip closing '"' as well +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.fatalError != nil { + return + } + + r.FetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.byteValueCloned = false + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.fatalError == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.fatalError == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.fatalError != nil { + return + } + if r.UseMultipleErrors { + r.pos = r.start + r.consume() + r.SkipRecursive() + switch expected { + case "[": + r.token.delimValue = ']' + r.token.kind = tokenDelim + case "{": + r.token.delimValue = '}' + r.token.kind = tokenDelim + } + r.addNonfatalError(&LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + }) + return + } + + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } +} + +func (r *Lexer) GetPos() int { + return r.pos +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() || r.token.delimValue != c { + r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. + r.errInvalidToken(string([]byte{c})) + } else { + r.consume() + } +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + var start, end byte + startPos := r.start + + switch r.token.delimValue { + case '{': + start, end = '{', '}' + case '[': + start, end = '[', ']' + default: + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } + return + } + case c == '\\' && inQuotes: + wasEscape = !wasEscape + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "EOF reached while skipping array/object or token", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// IsStart returns whether the lexer is positioned at the start +// of an input string. +func (r *Lexer) IsStart() bool { + return r.pos == 0 +} + +// Consumed reads all remaining bytes from the input, publishing an error if +// there is anything but whitespace remaining. +func (r *Lexer) Consumed() { + if r.pos > len(r.Data) || !r.Ok() { + return + } + + for _, c := range r.Data[r.pos:] { + if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + r.AddError(&LexerError{ + Reason: "invalid character '" + string(c) + "' after top-level value", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + }) + return + } + + r.pos++ + r.start++ + } +} + +func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "", nil + } + if !skipUnescape { + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "", nil + } + } + + bytes := r.token.byteValue + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret, bytes +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + ret, _ := r.unsafeString(false) + return ret +} + +// UnsafeBytes returns the byte slice if the token is a string literal. +func (r *Lexer) UnsafeBytes() []byte { + _, ret := r.unsafeString(false) + return ret +} + +// UnsafeFieldName returns current member name string token +func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { + ret, _ := r.unsafeString(skipUnescape) + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "" + } + var ret string + if r.token.byteValueCloned { + ret = bytesToStr(r.token.byteValue) + } else { + ret = string(r.token.byteValue) + } + r.consume() + return ret +} + +// StringIntern reads a string literal, and performs string interning on it. +func (r *Lexer) StringIntern() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "" + } + ret := intern.Bytes(r.token.byteValue) + r.consume() + return ret +} + +// Bytes reads a string literal and base64 decodes it into a byte slice. +func (r *Lexer) Bytes() []byte { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return nil + } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } + ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) + n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + if err != nil { + r.fatalError = &LexerError{ + Reason: err.Error(), + } + return nil + } + + r.consume() + return ret[:n] +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) UintptrStr() uintptr { + return uintptr(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return float32(n) +} + +func (r *Lexer) Float32Str() float32 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Float64Str() float64 { + s, b := r.unsafeString(false) + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) Error() error { + return r.fatalError +} + +func (r *Lexer) AddError(e error) { + if r.fatalError == nil { + r.fatalError = e + } +} + +func (r *Lexer) AddNonFatalError(e error) { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + Reason: e.Error(), + }) +} + +func (r *Lexer) addNonfatalError(err *LexerError) { + if r.UseMultipleErrors { + // We don't want to add errors with the same offset. + if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { + return + } + r.multipleErrors = append(r.multipleErrors, err) + return + } + r.fatalError = err +} + +func (r *Lexer) GetNonFatalErrors() []*LexerError { + return r.multipleErrors +} + +// JsonNumber fetches and json.Number from 'encoding/json' package. +// Both int, float or string, contains them are valid values +func (r *Lexer) JsonNumber() json.Number { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() { + r.errInvalidToken("json.Number") + return json.Number("") + } + + switch r.token.kind { + case tokenString: + return json.Number(r.String()) + case tokenNumber: + return json.Number(r.Raw()) + case tokenNull: + r.Null() + return json.Number("") + default: + r.errSyntax() + return json.Number("") + } +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + ret := []interface{}{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 000000000..2c5b20105 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,405 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + +// Writer is a JSON writer. +type Writer struct { + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.ReadCloser(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + w.base64(data) + w.Buffer.AppendByte('"') +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintptrStr(n uintptr) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float32Str(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Float64Str(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func getTable(falseValues ...int) [128]bool { + table := [128]bool{} + + for i := 0; i < 128; i++ { + table[i] = true + } + + for _, v := range falseValues { + table[v] = false + } + + return table +} + +var ( + htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') + htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') +) + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + escapeTable := &htmlEscapeTable + if w.NoEscapeHTML { + escapeTable = &htmlNoEscapeTable + } + + for i := 0; i < len(s); { + c := s[i] + + if c < utf8.RuneSelf { + if escapeTable[c] { + // single-width character, no escaping is required + i++ + continue + } + + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} + +const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" +const padChar = '=' + +func (w *Writer) base64(in []byte) { + + if len(in) == 0 { + return + } + + w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) + + si := 0 + n := (len(in) / 3) * 3 + + for si < n { + // Convert 3x 8bit source bytes into 4 bytes + val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) + + si += 3 + } + + remain := len(in) - si + if remain == 0 { + return + } + + // Add the remaining small block + val := uint(in[si+0]) << 16 + if remain == 2 { + val |= uint(in[si+1]) << 8 + } + + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) + + switch remain { + case 2: + w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) + case 1: + w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) + } +} diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE new file mode 100644 index 000000000..bbc7b897c --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile new file mode 100644 index 000000000..e33ee1730 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt new file mode 100644 index 000000000..7723656d5 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go new file mode 100644 index 000000000..1dd1cad64 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -0,0 +1,189 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept + +func (slice acceptSlice) Len() int { + return len(slice) +} + +func (slice acceptSlice) Less(i, j int) bool { + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (slice acceptSlice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) + + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) + + a := Accept{ + Q: 1.0, + } + + sp, remainingPart := nextSplitElement(part, ";") + + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) + + switch { + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } + default: + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) + } + + if len(remainingPart) == 0 { + accept = append(accept, a) + continue + } + + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { + continue + } + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp1, 32) + } else { + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) + } + } + + accept = append(accept, a) + } + + sort.Sort(accept) + return accept +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml new file mode 100644 index 000000000..8902bdaaf --- /dev/null +++ b/vendor/github.com/x448/float16/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.11.x + +env: + - GO111MODULE=on + +script: + - go test -short -coverprofile=coverage.txt -covermode=count ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE new file mode 100644 index 000000000..bf6e35785 --- /dev/null +++ b/vendor/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md new file mode 100644 index 000000000..b524b8135 --- /dev/null +++ b/vendor/github.com/x448/float16/README.md @@ -0,0 +1,133 @@ +# Float16 (Binary16) in Go/Golang +[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16) +[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16) +[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16) +[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE) + +`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16. + +IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result. + +All possible 4+ billion floating-point conversions with this library are verified to be correct. + +Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library. + +## Features +Current features include: + +* float16 to float32 conversions use lossless conversion. +* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven". +* conversions using pure Go take about 2.65 ns/op on a desktop amd64. +* unit tests provide 100% code coverage and check all possible 4+ billion conversions. +* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc. +* all functions in this library use zero allocs except String(). + +## Status +This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published. + +Current status: + +* core API is done and breaking API changes are unlikely. +* 100% of unit tests pass: + * short mode (`go test -short`) tests around 65765 conversions in 0.005s. + * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s. +* 100% code coverage with both short mode and normal mode. +* tested on amd64 but it should work on all little-endian platforms supported by Go. + +Roadmap: + +* add functions for fast batch conversions leveraging SIMD when supported by hardware. +* speed up unit test when verifying all possible 4+ billion conversions. +* test on additional platforms. + +## Float16 to Float32 Conversion +Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct. + +Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions. + +## Float32 to Float16 Conversion +Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct. + +Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32(). + +Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage. + +## Usage +Install with `go get github.com/x448/float16`. +``` +// Convert float32 to float16 +pi := float32(math.Pi) +pi16 := float16.Fromfloat32(pi) + +// Convert float16 to float32 +pi32 := pi16.Float32() + +// PrecisionFromfloat32() is faster than the overhead of calling a function. +// This example only converts if there's no data loss and input is not a subnormal. +if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact { + pi16 := float16.Fromfloat32(pi) +} +``` + +## Float16 Type and API +Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods. +``` +package float16 // import "github.com/x448/float16" + +// Exported types and consts +type Float16 uint16 +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +// Exported functions +Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding + with identical results to AMD and Intel F16C hardware. NaN inputs + are converted with quiet bit always set on, to be like F16C. + +FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit. + // The "ps" suffix means "preserve signaling". + // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN. + +Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.) +NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number +Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign + +PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow + // (inline and < 1 ns/op) +// Exported methods +(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion +(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f +(f Float16) IsNaN() bool // true if f is not-a-number (NaN) +(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN) +(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf) +(f Float16) IsFinite() bool // true if f is not infinite or NaN +(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN. +(f Float16) Signbit() bool // true if f is negative or negative zero +(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface +``` +See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info. + +## Benchmarks +Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value. + +``` +All functions have zero allocations except float16.String(). + +FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16 +ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32 +Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16 + +PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc. +``` + +## System Requirements +* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions. +* Tested on amd64 but it should also work on all little-endian platforms supported by Go. + +## Special Thanks +Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16. + +## License +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Licensed under [MIT License](LICENSE) diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go new file mode 100644 index 000000000..1a0e6dad0 --- /dev/null +++ b/vendor/github.com/x448/float16/float16.go @@ -0,0 +1,302 @@ +// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker +// +// Special thanks to Kathryn Long for her Rust implementation +// of float16 at github.com/starkat99/half-rs (MIT license) + +package float16 + +import ( + "math" + "strconv" +) + +// Float16 represents IEEE 754 half-precision floating-point numbers (binary16). +type Float16 uint16 + +// Precision indicates whether the conversion to Float16 is +// exact, subnormal without dropped bits, inexact, underflow, or overflow. +type Precision int + +const ( + + // PrecisionExact is for non-subnormals that don't drop bits during conversion. + // All of these can round-trip. Should always convert to float16. + PrecisionExact Precision = iota + + // PrecisionUnknown is for subnormals that don't drop bits during conversion but + // not all of these can round-trip so precision is unknown without more effort. + // Only 2046 of these can round-trip and the rest cannot round-trip. + PrecisionUnknown + + // PrecisionInexact is for dropped significand bits and cannot round-trip. + // Some of these are subnormals. Cannot round-trip float32->float16->float32. + PrecisionInexact + + // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32. + PrecisionUnderflow + + // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32. + PrecisionOverflow +) + +// PrecisionFromfloat32 returns Precision without performing +// the conversion. Conversions from both Infinity and NaN +// values will always report PrecisionExact even if NaN payload +// or NaN-Quiet-Bit is lost. This function is kept simple to +// allow inlining and run < 0.5 ns/op, to serve as a fast filter. +func PrecisionFromfloat32(f32 float32) Precision { + u32 := math.Float32bits(f32) + + if u32 == 0 || u32 == 0x80000000 { + // +- zero will always be exact conversion + return PrecisionExact + } + + const COEFMASK uint32 = 0x7fffff // 23 least significant bits + const EXPSHIFT uint32 = 23 + const EXPBIAS uint32 = 127 + const EXPMASK uint32 = uint32(0xff) << EXPSHIFT + const DROPMASK uint32 = COEFMASK >> 10 + + exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS) + coef := u32 & COEFMASK + + if exp == 128 { + // +- infinity or NaN + // apps may want to do extra checks for NaN separately + return PrecisionExact + } + + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says, + // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24" + if exp < -24 { + return PrecisionUnderflow + } + if exp > 15 { + return PrecisionOverflow + } + if (coef & DROPMASK) != uint32(0) { + // these include subnormals and non-subnormals that dropped bits + return PrecisionInexact + } + + if exp < -14 { + // Subnormals. Caller may want to test these further. + // There are 2046 subnormals that can successfully round-trip f32->f16->f32 + // and 20 of those 2046 have 32-bit input coef == 0. + // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value" + // so some protocols and libraries will choose to handle subnormals differently + // when deciding to encode them to CBOR float32 vs float16. + return PrecisionUnknown + } + + return PrecisionExact +} + +// Frombits returns the float16 number corresponding to the IEEE 754 binary16 +// representation u16, with the sign bit of u16 and the result in the same bit +// position. Frombits(Bits(x)) == x. +func Frombits(u16 uint16) Float16 { + return Float16(u16) +} + +// Fromfloat32 returns a Float16 value converted from f32. Conversion uses +// IEEE default rounding (nearest int, with ties to even). +func Fromfloat32(f32 float32) Float16 { + return Float16(f32bitsToF16bits(math.Float32bits(f32))) +} + +// ErrInvalidNaNValue indicates a NaN was not received. +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +type float16Error string + +func (e float16Error) Error() string { return string(e) } + +// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both +// signaling and payload. Unlike Fromfloat32(), which can only return +// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN. +// If the result is infinity (sNaN with empty payload), then the +// lowest bit of payload is set to make the result a NaN. +// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN. +// This function was kept simple to be able to inline. +func FromNaN32ps(nan float32) (Float16, error) { + const SNAN = Float16(uint16(0x7c01)) // signalling NaN + + u32 := math.Float32bits(nan) + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if (exp != 0x7f800000) || (coef == 0) { + return SNAN, ErrInvalidNaNValue + } + + u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13)) + + if (u16 & 0x03ff) == 0 { + // result became infinity, make it NaN by setting lowest bit in payload + u16 = u16 | 0x0001 + } + + return Float16(u16), nil +} + +// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN). +// Returned NaN value 0x7e01 has all exponent bits = 1 with the +// first and last bits = 1 in the significand. This is consistent +// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00. +func NaN() Float16 { + return Float16(0x7e01) +} + +// Inf returns a Float16 with an infinity value with the specified sign. +// A sign >= returns positive infinity. +// A sign < 0 returns negative infinity. +func Inf(sign int) Float16 { + if sign >= 0 { + return Float16(0x7c00) + } + return Float16(0x8000 | 0x7c00) +} + +// Float32 returns a float32 converted from f (Float16). +// This is a lossless conversion. +func (f Float16) Float32() float32 { + u32 := f16bitsToF32bits(uint16(f)) + return math.Float32frombits(u32) +} + +// Bits returns the IEEE 754 binary16 representation of f, with the sign bit +// of f and the result in the same bit position. Bits(Frombits(x)) == x. +func (f Float16) Bits() uint16 { + return uint16(f) +} + +// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value. +func (f Float16) IsNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) +} + +// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16 +// “not-a-number” value. +func (f Float16) IsQuietNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0) +} + +// IsInf reports whether f is an infinity (inf). +// A sign > 0 reports whether f is positive inf. +// A sign < 0 reports whether f is negative inf. +// A sign == 0 reports whether f is either inf. +func (f Float16) IsInf(sign int) bool { + return ((f == 0x7c00) && sign >= 0) || + (f == 0xfc00 && sign <= 0) +} + +// IsFinite returns true if f is neither infinite nor NaN. +func (f Float16) IsFinite() bool { + return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00) +} + +// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN. +func (f Float16) IsNormal() bool { + exp := uint16(f) & uint16(0x7c00) + return (exp != uint16(0x7c00)) && (exp != 0) +} + +// Signbit reports whether f is negative or negative zero. +func (f Float16) Signbit() bool { + return (uint16(f) & uint16(0x8000)) != 0 +} + +// String satisfies the fmt.Stringer interface. +func (f Float16) String() string { + return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32) +} + +// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16. +func f16bitsToF32bits(in uint16) uint32 { + // All 65536 conversions with this were confirmed to be correct + // by Montgomery Edwards⁴⁴⁸ (github.com/x448). + + sign := uint32(in&0x8000) << 16 // sign for 32-bit + exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit + coef := uint32(in&0x03ff) << 13 // significand for 32-bit + + if exp == 0x1f { + if coef == 0 { + // infinity + return sign | 0x7f800000 | coef + } + // NaN + return sign | 0x7fc00000 | coef + } + + if exp == 0 { + if coef == 0 { + // zero + return sign + } + + // normalize subnormal numbers + exp++ + for coef&0x7f800000 == 0 { + coef <<= 1 + exp-- + } + coef &= 0x007fffff + } + + return sign | ((exp + (0x7f - 0xf)) << 23) | coef +} + +// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32. +// Conversion rounds to nearest integer with ties to even. +func f32bitsToF16bits(u32 uint32) uint16 { + // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448). + // All 4294967296 conversions with this were confirmed to be correct by x448. + // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license. + + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if exp == 0x7f800000 { + // NaN or Infinity + nanBit := uint32(0) + if coef != 0 { + nanBit = uint32(0x0200) + } + return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13)) + } + + halfSign := sign >> 16 + + unbiasedExp := int32(exp>>23) - 127 + halfExp := unbiasedExp + 15 + + if halfExp >= 0x1f { + return uint16(halfSign | uint32(0x7c00)) + } + + if halfExp <= 0 { + if 14-halfExp > 24 { + return uint16(halfSign) + } + coef := coef | uint32(0x00800000) + halfCoef := coef >> uint32(14-halfExp) + roundBit := uint32(1) << uint32(13-halfExp) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + halfCoef++ + } + return uint16(halfSign | halfCoef) + } + + uHalfExp := uint32(halfExp) << 10 + halfCoef := coef >> 13 + roundBit := uint32(0x00001000) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + return uint16((halfSign | uHalfExp | halfCoef) + 1) + } + return uint16(halfSign | uHalfExp | halfCoef) +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4..000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cfdb..000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b86793..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a90..000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803..000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e852..9b4de9401 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 43557ab7e..105c3b279 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } // Also close the connection after any CONTINUATION frame following an @@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df2818..003e649f3 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ce2e8b40e..6c349f3ec 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -154,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -599,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -649,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -732,11 +772,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -815,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -847,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -926,13 +964,13 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1061,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1429,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1482,6 +1520,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1638,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1660,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2020,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2118,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2342,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2638,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2760,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2776,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2786,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2802,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd16..000000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 000000000..0b1c17b81 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ce375c8c7..98a49c6b6 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -185,7 +185,45 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -352,60 +390,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { @@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := t.newTimer(d) select { case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) @@ -725,8 +696,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, } - if hooks != nil { - hooks.newclientconn(cc) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) c = cc.tconn } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) - } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } @@ -893,7 +860,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } @@ -901,7 +874,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -936,7 +909,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1131,7 +1117,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1143,9 +1130,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1155,7 +1142,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1193,7 +1180,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1308,23 +1295,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1385,24 +1379,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1432,8 +1409,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1444,7 +1422,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1458,21 +1436,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false - } - return true - }) - } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1497,28 +1460,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1581,7 +1524,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv @@ -1590,21 +1533,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1753,7 +1681,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2015,7 +1943,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() + cc.cond.Wait() } } @@ -2298,7 +2226,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2320,6 +2248,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2386,7 +2315,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2423,7 +2352,7 @@ func (rl *clientConnReadLoop) run() error { readIdleTimeout := cc.t.ReadIdleTimeout var t timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -3021,7 +2950,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -3076,7 +3005,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } @@ -3120,7 +3049,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3131,20 +3061,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c669..f6783339d 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 1473e1296..781770c20 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) * [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) -## Policy for new packages +## Policy for new endpoints We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a @@ -29,8 +29,12 @@ package. ## Report Issues / Send Patches -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000..e99c92f39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go deleted file mode 100644 index e1755d1d9..000000000 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package internal - -import "google.golang.org/appengine/urlfetch" - -func init() { - appengineClientHook = urlfetch.Client -} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index c0ab196cf..14989beaf 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -14,7 +14,7 @@ import ( // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key +// PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 355c38696..e83ddeef0 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,9 +18,8 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -57,12 +56,18 @@ type Token struct { } // tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` } func (e *tokenJSON) expiry() (t time.Time) { @@ -111,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -185,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -218,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -229,7 +253,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } @@ -238,21 +262,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string vals, err := url.ParseQuery(string(body)) if err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), @@ -267,8 +299,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, @@ -278,17 +316,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } if token.AccessToken == "" { return nil, errors.New("oauth2: server response missing access_token") } return token, nil } +// mirrors oauth2.RetrieveError type RetrieveError struct { - Response *http.Response - Body []byte + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 572074a63..b9db01ddf 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -18,16 +18,11 @@ var HTTPClient ContextKey // because nobody else can create a ContextKey, being unexported. type ContextKey struct{} -var appengineClientHook func(context.Context) *http.Client - func ContextClient(ctx context.Context) *http.Client { if ctx != nil { if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { return hc } } - if appengineClientHook != nil { - return appengineClientHook(ctx) - } return http.DefaultClient } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 291df5c83..09f6a49b8 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -16,6 +16,7 @@ import ( "net/url" "strings" "sync" + "time" "golang.org/x/oauth2/internal" ) @@ -57,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -70,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -138,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -161,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -206,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, @@ -290,6 +300,8 @@ type reuseTokenSource struct { mu sync.Mutex // guards t t *Token + + expiryDelta time.Duration } // Token returns the current token if it's still valid, else will @@ -305,6 +317,7 @@ func (s *reuseTokenSource) Token() (*Token, error) { if err != nil { return nil, err } + t.expiryDelta = s.expiryDelta s.t = t return t, nil } @@ -379,3 +392,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource { new: src, } } + +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000..50593b6df --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 822720341..5bbb33217 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -16,10 +16,10 @@ import ( "golang.org/x/oauth2/internal" ) -// expiryDelta determines how earlier a token should be considered +// defaultExpiryDelta determines how earlier a token should be considered // expired than its actual expiration time. It is used to avoid late // expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second +const defaultExpiryDelta = 10 * time.Second // Token represents the credentials used to authorize // the requests to access protected resources on the OAuth 2.0 @@ -52,6 +52,11 @@ type Token struct { // raw optionally contains extra metadata from the server // when updating a token. raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration } // Type returns t.TokenType if non-empty, else "Bearer". @@ -127,6 +132,11 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) } @@ -154,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) @@ -165,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) } // RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 type RetrieveError struct { Response *http.Response // Body is the body that was consumed by reading Response.Body. // It may be truncated. Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index b0b982e9c..f0e0cf3cb 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -196,18 +209,20 @@ func (lim *Limiter) Reserve() *Reservation { // The Limiter takes this Reservation into account when allowing future events. // The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. // Usage example: -// r := lim.ReserveN(time.Now(), 1) -// if !r.OK() { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// return -// } -// time.Sleep(r.Delay()) -// Act() +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -221,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -236,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -273,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -290,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -304,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -313,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -325,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -351,16 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) - } + r.timeToAct = t.Add(waitDuration) - // Update state - if ok { - lim.last = now + // Update state + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -369,20 +392,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 000000000..6ba99ddb6 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index 721053c20..000000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,678 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - MaxIdleConns: 1000, - MaxIdleConnsPerHost: 10000, - IdleConnTimeout: 90 * time.Second, - }, - } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -// jointContext joins two contexts in a superficial way. -// It takes values and timeouts from a base context, and only values from another context. -type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context -} - -func (c jointContext) Deadline() (time.Time, bool) { - return c.base.Deadline() -} - -func (c jointContext) Done() <-chan struct{} { - return c.base.Done() -} - -func (c jointContext) Err() error { - return c.base.Err() -} - -func (c jointContext) Value(key interface{}) interface{} { - if val := c.base.Value(key); val != nil { - return val - } - return c.valuesOnly.Value(key) -} - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return req.Context() -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - return jointContext{ - base: parent, - valuesOnly: req.Context(), - } -} - -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation - if !IsSecondGen() { - log.Print(logLevelName[level] + ": " + s) - } -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index f0f40b2e3..000000000 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "errors" - "fmt" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var contextKey = "holds an appengine.Context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { - c := fromContext(ctx) - if c == nil { - return nil, errNotAppEngineContext - } - return c, nil -} - -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -type testingContext struct { - appengine.Context - - req *http.Request -} - -func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } -func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { - if service == "__go__" && method == "GetNamespace" { - return nil - } - return fmt.Errorf("testingContext: unsupported Call") -} -func (t *testingContext) Request() interface{} { return t.req } - -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index e0c0b214b..000000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "errors" - "os" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var errNotAppEngineContext = errors.New("not an App Engine context") - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - logf(c, level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - return withNamespace(ctx, namespace) -} - -// SetTestEnv sets the env variables for testing background ticket in Flex. -func SetTestEnv() func() { - var environ = []struct { - key, value string - }{ - {"GAE_LONG_APP_ID", "my-app-id"}, - {"GAE_MINOR_VERSION", "067924799508853122"}, - {"GAE_MODULE_INSTANCE", "0"}, - {"GAE_MODULE_NAME", "default"}, - {"GAE_MODULE_VERSION", "20150612t184001"}, - } - - for _, v := range environ { - old := os.Getenv(v.key) - os.Setenv(v.key, v.value) - v.value = old - } - return func() { // Restore old environment after the test completes. - for _, v := range environ { - if v.value == "" { - os.Unsetenv(v.key) - continue - } - os.Setenv(v.key, v.value) - } - } -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c07b..000000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index db4777e68..000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/base/api_base.proto - -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} -func (*StringProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{0} -} -func (m *StringProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringProto.Unmarshal(m, b) -} -func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringProto.Marshal(b, m, deterministic) -} -func (dst *StringProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringProto.Merge(dst, src) -} -func (m *StringProto) XXX_Size() int { - return xxx_messageInfo_StringProto.Size(m) -} -func (m *StringProto) XXX_DiscardUnknown() { - xxx_messageInfo_StringProto.DiscardUnknown(m) -} - -var xxx_messageInfo_StringProto proto.InternalMessageInfo - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} -func (*Integer32Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{1} -} -func (m *Integer32Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer32Proto.Unmarshal(m, b) -} -func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic) -} -func (dst *Integer32Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer32Proto.Merge(dst, src) -} -func (m *Integer32Proto) XXX_Size() int { - return xxx_messageInfo_Integer32Proto.Size(m) -} -func (m *Integer32Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer32Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} -func (*Integer64Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{2} -} -func (m *Integer64Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer64Proto.Unmarshal(m, b) -} -func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic) -} -func (dst *Integer64Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer64Proto.Merge(dst, src) -} -func (m *Integer64Proto) XXX_Size() int { - return xxx_messageInfo_Integer64Proto.Size(m) -} -func (m *Integer64Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer64Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} -func (*BoolProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{3} -} -func (m *BoolProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoolProto.Unmarshal(m, b) -} -func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic) -} -func (dst *BoolProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolProto.Merge(dst, src) -} -func (m *BoolProto) XXX_Size() int { - return xxx_messageInfo_BoolProto.Size(m) -} -func (m *BoolProto) XXX_DiscardUnknown() { - xxx_messageInfo_BoolProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BoolProto proto.InternalMessageInfo - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} -func (*DoubleProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{4} -} -func (m *DoubleProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleProto.Unmarshal(m, b) -} -func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic) -} -func (dst *DoubleProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleProto.Merge(dst, src) -} -func (m *DoubleProto) XXX_Size() int { - return xxx_messageInfo_DoubleProto.Size(m) -} -func (m *DoubleProto) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleProto proto.InternalMessageInfo - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} -func (*BytesProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{5} -} -func (m *BytesProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BytesProto.Unmarshal(m, b) -} -func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic) -} -func (dst *BytesProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesProto.Merge(dst, src) -} -func (m *BytesProto) XXX_Size() int { - return xxx_messageInfo_BytesProto.Size(m) -} -func (m *BytesProto) XXX_DiscardUnknown() { - xxx_messageInfo_BytesProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesProto proto.InternalMessageInfo - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} -func (*VoidProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{6} -} -func (m *VoidProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VoidProto.Unmarshal(m, b) -} -func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic) -} -func (dst *VoidProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoidProto.Merge(dst, src) -} -func (m *VoidProto) XXX_Size() int { - return xxx_messageInfo_VoidProto.Size(m) -} -func (m *VoidProto) XXX_DiscardUnknown() { - xxx_messageInfo_VoidProto.DiscardUnknown(m) -} - -var xxx_messageInfo_VoidProto proto.InternalMessageInfo - -func init() { - proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto") - proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto") - proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto") - proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto") - proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto") - proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto") - proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140) -} - -var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{ - // 199 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30, - 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40, - 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6, - 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62, - 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71, - 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe, - 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1, - 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71, - 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d, - 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff, - 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d, - 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba, - 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3ca..000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 2fb748289..000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,4367 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto - -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} -func (Property_Meaning) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0} -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} -func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1} -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} -func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0} -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} -func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0} -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} -func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0} -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} -func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0} -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} -func (Query_Hint) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} -func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0} -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} -func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0} -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} -func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0} -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} -func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0} -} - -type BeginTransactionRequest_TransactionMode int32 - -const ( - BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0 - BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1 - BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2 -) - -var BeginTransactionRequest_TransactionMode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READ_ONLY", - 2: "READ_WRITE", -} -var BeginTransactionRequest_TransactionMode_value = map[string]int32{ - "UNKNOWN": 0, - "READ_ONLY": 1, - "READ_WRITE": 2, -} - -func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode { - p := new(BeginTransactionRequest_TransactionMode) - *p = x - return p -} -func (x BeginTransactionRequest_TransactionMode) String() string { - return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x)) -} -func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode") - if err != nil { - return err - } - *x = BeginTransactionRequest_TransactionMode(value) - return nil -} -func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0} -} - -type Action struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} -func (*Action) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0} -} -func (m *Action) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Action.Unmarshal(m, b) -} -func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Action.Marshal(b, m, deterministic) -} -func (dst *Action) XXX_Merge(src proto.Message) { - xxx_messageInfo_Action.Merge(dst, src) -} -func (m *Action) XXX_Size() int { - return xxx_messageInfo_Action.Size(m) -} -func (m *Action) XXX_DiscardUnknown() { - xxx_messageInfo_Action.DiscardUnknown(m) -} - -var xxx_messageInfo_Action proto.InternalMessageInfo - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} -func (*PropertyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1} -} -func (m *PropertyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue.Unmarshal(m, b) -} -func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue.Merge(dst, src) -} -func (m *PropertyValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue.Size(m) -} -func (m *PropertyValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue proto.InternalMessageInfo - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} -func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0} -} -func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b) -} -func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src) -} -func (m *PropertyValue_PointValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_PointValue.Size(m) -} -func (m *PropertyValue_PointValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} -func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1} -} -func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b) -} -func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src) -} -func (m *PropertyValue_UserValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_UserValue.Size(m) -} -func (m *PropertyValue_UserValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} -func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2} -} -func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m) -} -func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} -func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0} -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} -func (*Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2} -} -func (m *Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Property.Unmarshal(m, b) -} -func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Property.Marshal(b, m, deterministic) -} -func (dst *Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Property.Merge(dst, src) -} -func (m *Property) XXX_Size() int { - return xxx_messageInfo_Property.Size(m) -} -func (m *Property) XXX_DiscardUnknown() { - xxx_messageInfo_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Property proto.InternalMessageInfo - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3} -} -func (m *Path) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path.Unmarshal(m, b) -} -func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path.Marshal(b, m, deterministic) -} -func (dst *Path) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path.Merge(dst, src) -} -func (m *Path) XXX_Size() int { - return xxx_messageInfo_Path.Size(m) -} -func (m *Path) XXX_DiscardUnknown() { - xxx_messageInfo_Path.DiscardUnknown(m) -} - -var xxx_messageInfo_Path proto.InternalMessageInfo - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} -func (*Path_Element) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0} -} -func (m *Path_Element) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path_Element.Unmarshal(m, b) -} -func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic) -} -func (dst *Path_Element) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path_Element.Merge(dst, src) -} -func (m *Path_Element) XXX_Size() int { - return xxx_messageInfo_Path_Element.Size(m) -} -func (m *Path_Element) XXX_DiscardUnknown() { - xxx_messageInfo_Path_Element.DiscardUnknown(m) -} - -var xxx_messageInfo_Path_Element proto.InternalMessageInfo - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} -func (*Reference) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4} -} -func (m *Reference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Reference.Unmarshal(m, b) -} -func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Reference.Marshal(b, m, deterministic) -} -func (dst *Reference) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reference.Merge(dst, src) -} -func (m *Reference) XXX_Size() int { - return xxx_messageInfo_Reference.Size(m) -} -func (m *Reference) XXX_DiscardUnknown() { - xxx_messageInfo_Reference.DiscardUnknown(m) -} - -var xxx_messageInfo_Reference proto.InternalMessageInfo - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_User.Unmarshal(m, b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_User.Marshal(b, m, deterministic) -} -func (dst *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(dst, src) -} -func (m *User) XXX_Size() int { - return xxx_messageInfo_User.Size(m) -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} -func (*EntityProto) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6} -} -func (m *EntityProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EntityProto.Unmarshal(m, b) -} -func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic) -} -func (dst *EntityProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EntityProto.Merge(dst, src) -} -func (m *EntityProto) XXX_Size() int { - return xxx_messageInfo_EntityProto.Size(m) -} -func (m *EntityProto) XXX_DiscardUnknown() { - xxx_messageInfo_EntityProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EntityProto proto.InternalMessageInfo - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} -func (*CompositeProperty) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7} -} -func (m *CompositeProperty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeProperty.Unmarshal(m, b) -} -func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic) -} -func (dst *CompositeProperty) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeProperty.Merge(dst, src) -} -func (m *CompositeProperty) XXX_Size() int { - return xxx_messageInfo_CompositeProperty.Size(m) -} -func (m *CompositeProperty) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeProperty.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} -func (*Index) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8} -} -func (m *Index) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index.Unmarshal(m, b) -} -func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index.Marshal(b, m, deterministic) -} -func (dst *Index) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index.Merge(dst, src) -} -func (m *Index) XXX_Size() int { - return xxx_messageInfo_Index.Size(m) -} -func (m *Index) XXX_DiscardUnknown() { - xxx_messageInfo_Index.DiscardUnknown(m) -} - -var xxx_messageInfo_Index proto.InternalMessageInfo - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} -func (*Index_Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0} -} -func (m *Index_Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index_Property.Unmarshal(m, b) -} -func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic) -} -func (dst *Index_Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index_Property.Merge(dst, src) -} -func (m *Index_Property) XXX_Size() int { - return xxx_messageInfo_Index_Property.Size(m) -} -func (m *Index_Property) XXX_DiscardUnknown() { - xxx_messageInfo_Index_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Index_Property proto.InternalMessageInfo - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} -func (*CompositeIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9} -} -func (m *CompositeIndex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndex.Unmarshal(m, b) -} -func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic) -} -func (dst *CompositeIndex) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndex.Merge(dst, src) -} -func (m *CompositeIndex) XXX_Size() int { - return xxx_messageInfo_CompositeIndex.Size(m) -} -func (m *CompositeIndex) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndex.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} -func (*IndexPostfix) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10} -} -func (m *IndexPostfix) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix.Unmarshal(m, b) -} -func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix.Merge(dst, src) -} -func (m *IndexPostfix) XXX_Size() int { - return xxx_messageInfo_IndexPostfix.Size(m) -} -func (m *IndexPostfix) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} -func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0} -} -func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b) -} -func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src) -} -func (m *IndexPostfix_IndexValue) XXX_Size() int { - return xxx_messageInfo_IndexPostfix_IndexValue.Size(m) -} -func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} -func (*IndexPosition) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11} -} -func (m *IndexPosition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPosition.Unmarshal(m, b) -} -func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic) -} -func (dst *IndexPosition) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPosition.Merge(dst, src) -} -func (m *IndexPosition) XXX_Size() int { - return xxx_messageInfo_IndexPosition.Size(m) -} -func (m *IndexPosition) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPosition.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPosition proto.InternalMessageInfo - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Snapshot.Unmarshal(m, b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) -} -func (dst *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(dst, src) -} -func (m *Snapshot) XXX_Size() int { - return xxx_messageInfo_Snapshot.Size(m) -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} -func (*InternalHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13} -} -func (m *InternalHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InternalHeader.Unmarshal(m, b) -} -func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic) -} -func (dst *InternalHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalHeader.Merge(dst, src) -} -func (m *InternalHeader) XXX_Size() int { - return xxx_messageInfo_InternalHeader.Size(m) -} -func (m *InternalHeader) XXX_DiscardUnknown() { - xxx_messageInfo_InternalHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_InternalHeader proto.InternalMessageInfo - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} -func (*Transaction) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14} -} -func (m *Transaction) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Transaction.Unmarshal(m, b) -} -func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) -} -func (dst *Transaction) XXX_Merge(src proto.Message) { - xxx_messageInfo_Transaction.Merge(dst, src) -} -func (m *Transaction) XXX_Size() int { - return xxx_messageInfo_Transaction.Size(m) -} -func (m *Transaction) XXX_DiscardUnknown() { - xxx_messageInfo_Transaction.DiscardUnknown(m) -} - -var xxx_messageInfo_Transaction proto.InternalMessageInfo - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15} -} -func (m *Query) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query.Unmarshal(m, b) -} -func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query.Marshal(b, m, deterministic) -} -func (dst *Query) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query.Merge(dst, src) -} -func (m *Query) XXX_Size() int { - return xxx_messageInfo_Query.Size(m) -} -func (m *Query) XXX_DiscardUnknown() { - xxx_messageInfo_Query.DiscardUnknown(m) -} - -var xxx_messageInfo_Query proto.InternalMessageInfo - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} -func (*Query_Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} -func (m *Query_Filter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Filter.Unmarshal(m, b) -} -func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic) -} -func (dst *Query_Filter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Filter.Merge(dst, src) -} -func (m *Query_Filter) XXX_Size() int { - return xxx_messageInfo_Query_Filter.Size(m) -} -func (m *Query_Filter) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Filter.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Filter proto.InternalMessageInfo - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} -func (*Query_Order) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1} -} -func (m *Query_Order) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Order.Unmarshal(m, b) -} -func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic) -} -func (dst *Query_Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Order.Merge(dst, src) -} -func (m *Query_Order) XXX_Size() int { - return xxx_messageInfo_Query_Order.Size(m) -} -func (m *Query_Order) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Order proto.InternalMessageInfo - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} -func (*CompiledQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16} -} -func (m *CompiledQuery) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery.Unmarshal(m, b) -} -func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery.Merge(dst, src) -} -func (m *CompiledQuery) XXX_Size() int { - return xxx_messageInfo_CompiledQuery.Size(m) -} -func (m *CompiledQuery) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} -func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0} -} -func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b) -} -func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src) -} -func (m *CompiledQuery_PrimaryScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m) -} -func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} -func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1} -} -func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m) -} -func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} -func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2} -} -func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b) -} -func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src) -} -func (m *CompiledQuery_EntityFilter) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m) -} -func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} -func (*CompiledCursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17} -} -func (m *CompiledCursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor.Unmarshal(m, b) -} -func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor.Merge(dst, src) -} -func (m *CompiledCursor) XXX_Size() int { - return xxx_messageInfo_CompiledCursor.Size(m) -} -func (m *CompiledCursor) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} -func (*CompiledCursor_Position) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0} -} -func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b) -} -func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position.Merge(dst, src) -} -func (m *CompiledCursor_Position) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position.Size(m) -} -func (m *CompiledCursor_Position) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} -func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0} -} -func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m) -} -func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} -func (*Cursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18} -} -func (m *Cursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cursor.Unmarshal(m, b) -} -func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cursor.Marshal(b, m, deterministic) -} -func (dst *Cursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cursor.Merge(dst, src) -} -func (m *Cursor) XXX_Size() int { - return xxx_messageInfo_Cursor.Size(m) -} -func (m *Cursor) XXX_DiscardUnknown() { - xxx_messageInfo_Cursor.DiscardUnknown(m) -} - -var xxx_messageInfo_Cursor proto.InternalMessageInfo - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19} -} -func (m *Error) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Error.Unmarshal(m, b) -} -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Error.Marshal(b, m, deterministic) -} -func (dst *Error) XXX_Merge(src proto.Message) { - xxx_messageInfo_Error.Merge(dst, src) -} -func (m *Error) XXX_Size() int { - return xxx_messageInfo_Error.Size(m) -} -func (m *Error) XXX_DiscardUnknown() { - xxx_messageInfo_Error.DiscardUnknown(m) -} - -var xxx_messageInfo_Error proto.InternalMessageInfo - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} -func (*Cost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20} -} -func (m *Cost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost.Unmarshal(m, b) -} -func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost.Marshal(b, m, deterministic) -} -func (dst *Cost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost.Merge(dst, src) -} -func (m *Cost) XXX_Size() int { - return xxx_messageInfo_Cost.Size(m) -} -func (m *Cost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost proto.InternalMessageInfo - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} -func (*Cost_CommitCost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0} -} -func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b) -} -func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic) -} -func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost_CommitCost.Merge(dst, src) -} -func (m *Cost_CommitCost) XXX_Size() int { - return xxx_messageInfo_Cost_CommitCost.Size(m) -} -func (m *Cost_CommitCost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21} -} -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (dst *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(dst, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22} -} -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (dst *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(dst, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} -func (*GetResponse_Entity) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0} -} -func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b) -} -func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic) -} -func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse_Entity.Merge(dst, src) -} -func (m *GetResponse_Entity) XXX_Size() int { - return xxx_messageInfo_GetResponse_Entity.Size(m) -} -func (m *GetResponse_Entity) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23} -} -func (m *PutRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutRequest.Unmarshal(m, b) -} -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) -} -func (dst *PutRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutRequest.Merge(dst, src) -} -func (m *PutRequest) XXX_Size() int { - return xxx_messageInfo_PutRequest.Size(m) -} -func (m *PutRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PutRequest proto.InternalMessageInfo - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24} -} -func (m *PutResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutResponse.Unmarshal(m, b) -} -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) -} -func (dst *PutResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutResponse.Merge(dst, src) -} -func (m *PutResponse) XXX_Size() int { - return xxx_messageInfo_PutResponse.Size(m) -} -func (m *PutResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PutResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PutResponse proto.InternalMessageInfo - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} -func (*TouchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25} -} -func (m *TouchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchRequest.Unmarshal(m, b) -} -func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic) -} -func (dst *TouchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchRequest.Merge(dst, src) -} -func (m *TouchRequest) XXX_Size() int { - return xxx_messageInfo_TouchRequest.Size(m) -} -func (m *TouchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TouchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchRequest proto.InternalMessageInfo - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} -func (*TouchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26} -} -func (m *TouchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchResponse.Unmarshal(m, b) -} -func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic) -} -func (dst *TouchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchResponse.Merge(dst, src) -} -func (m *TouchResponse) XXX_Size() int { - return xxx_messageInfo_TouchResponse.Size(m) -} -func (m *TouchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TouchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchResponse proto.InternalMessageInfo - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(dst, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(dst, src) -} -func (m *DeleteResponse) XXX_Size() int { - return xxx_messageInfo_DeleteResponse.Size(m) -} -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} -func (*NextRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29} -} -func (m *NextRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NextRequest.Unmarshal(m, b) -} -func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic) -} -func (dst *NextRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NextRequest.Merge(dst, src) -} -func (m *NextRequest) XXX_Size() int { - return xxx_messageInfo_NextRequest.Size(m) -} -func (m *NextRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NextRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NextRequest proto.InternalMessageInfo - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} -func (*QueryResult) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30} -} -func (m *QueryResult) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryResult.Unmarshal(m, b) -} -func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) -} -func (dst *QueryResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResult.Merge(dst, src) -} -func (m *QueryResult) XXX_Size() int { - return xxx_messageInfo_QueryResult.Size(m) -} -func (m *QueryResult) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResult.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResult proto.InternalMessageInfo - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} -func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31} -} -func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b) -} -func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsRequest.Merge(dst, src) -} -func (m *AllocateIdsRequest) XXX_Size() int { - return xxx_messageInfo_AllocateIdsRequest.Size(m) -} -func (m *AllocateIdsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} -func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32} -} -func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b) -} -func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsResponse.Merge(dst, src) -} -func (m *AllocateIdsResponse) XXX_Size() int { - return xxx_messageInfo_AllocateIdsResponse.Size(m) -} -func (m *AllocateIdsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} -func (*CompositeIndices) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33} -} -func (m *CompositeIndices) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndices.Unmarshal(m, b) -} -func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic) -} -func (dst *CompositeIndices) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndices.Merge(dst, src) -} -func (m *CompositeIndices) XXX_Size() int { - return xxx_messageInfo_CompositeIndices.Size(m) -} -func (m *CompositeIndices) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndices.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} -func (*AddActionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34} -} -func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b) -} -func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic) -} -func (dst *AddActionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsRequest.Merge(dst, src) -} -func (m *AddActionsRequest) XXX_Size() int { - return xxx_messageInfo_AddActionsRequest.Size(m) -} -func (m *AddActionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} -func (*AddActionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35} -} -func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b) -} -func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic) -} -func (dst *AddActionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsResponse.Merge(dst, src) -} -func (m *AddActionsResponse) XXX_Size() int { - return xxx_messageInfo_AddActionsResponse.Size(m) -} -func (m *AddActionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"` - DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"` - Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"` - PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} -func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36} -} -func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b) -} -func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic) -} -func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BeginTransactionRequest.Merge(dst, src) -} -func (m *BeginTransactionRequest) XXX_Size() int { - return xxx_messageInfo_BeginTransactionRequest.Size(m) -} -func (m *BeginTransactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false -const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -func (m *BeginTransactionRequest) GetDatabaseId() string { - if m != nil && m.DatabaseId != nil { - return *m.DatabaseId - } - return "" -} - -func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode { - if m != nil && m.Mode != nil { - return *m.Mode - } - return Default_BeginTransactionRequest_Mode -} - -func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction { - if m != nil { - return m.PreviousTransaction - } - return nil -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} -func (*CommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37} -} -func (m *CommitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse.Unmarshal(m, b) -} -func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) -} -func (dst *CommitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse.Merge(dst, src) -} -func (m *CommitResponse) XXX_Size() int { - return xxx_messageInfo_CommitResponse.Size(m) -} -func (m *CommitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse proto.InternalMessageInfo - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} -func (*CommitResponse_Version) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0} -} -func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b) -} -func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic) -} -func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse_Version.Merge(dst, src) -} -func (m *CommitResponse_Version) XXX_Size() int { - return xxx_messageInfo_CommitResponse_Version.Size(m) -} -func (m *CommitResponse_Version) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { - proto.RegisterType((*Action)(nil), "appengine.Action") - proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue") - proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue") - proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue") - proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue") - proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement") - proto.RegisterType((*Property)(nil), "appengine.Property") - proto.RegisterType((*Path)(nil), "appengine.Path") - proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element") - proto.RegisterType((*Reference)(nil), "appengine.Reference") - proto.RegisterType((*User)(nil), "appengine.User") - proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto") - proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty") - proto.RegisterType((*Index)(nil), "appengine.Index") - proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property") - proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex") - proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix") - proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue") - proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition") - proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot") - proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader") - proto.RegisterType((*Transaction)(nil), "appengine.Transaction") - proto.RegisterType((*Query)(nil), "appengine.Query") - proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter") - proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order") - proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery") - proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan") - proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan") - proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter") - proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor") - proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position") - proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue") - proto.RegisterType((*Cursor)(nil), "appengine.Cursor") - proto.RegisterType((*Error)(nil), "appengine.Error") - proto.RegisterType((*Cost)(nil), "appengine.Cost") - proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost") - proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest") - proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse") - proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity") - proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest") - proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse") - proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest") - proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse") - proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest") - proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse") - proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest") - proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult") - proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest") - proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse") - proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices") - proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest") - proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse") - proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest") - proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse") - proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179) -} - -var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{ - // 4156 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46, - 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d, - 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48, - 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46, - 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8, - 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24, - 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd, - 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f, - 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74, - 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac, - 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8, - 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9, - 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22, - 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56, - 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b, - 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05, - 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c, - 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2, - 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16, - 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3, - 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce, - 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67, - 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66, - 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13, - 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c, - 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6, - 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a, - 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f, - 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15, - 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a, - 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b, - 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17, - 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad, - 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50, - 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6, - 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b, - 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4, - 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2, - 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc, - 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e, - 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62, - 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee, - 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f, - 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4, - 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02, - 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae, - 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94, - 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f, - 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a, - 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52, - 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc, - 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92, - 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9, - 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50, - 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9, - 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23, - 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f, - 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87, - 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda, - 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b, - 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d, - 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f, - 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b, - 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9, - 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0, - 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68, - 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b, - 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79, - 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63, - 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0, - 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1, - 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10, - 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b, - 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b, - 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08, - 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72, - 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23, - 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91, - 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f, - 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93, - 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c, - 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa, - 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f, - 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef, - 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4, - 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90, - 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f, - 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86, - 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d, - 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee, - 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c, - 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1, - 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7, - 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80, - 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c, - 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21, - 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6, - 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26, - 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba, - 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c, - 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2, - 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8, - 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90, - 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91, - 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58, - 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c, - 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b, - 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f, - 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02, - 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22, - 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b, - 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0, - 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18, - 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8, - 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b, - 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e, - 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84, - 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0, - 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec, - 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7, - 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60, - 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad, - 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4, - 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76, - 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0, - 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba, - 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5, - 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26, - 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60, - 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e, - 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33, - 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e, - 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e, - 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7, - 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45, - 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92, - 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb, - 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc, - 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43, - 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2, - 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5, - 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40, - 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa, - 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc, - 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8, - 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7, - 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6, - 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c, - 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a, - 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e, - 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8, - 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a, - 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55, - 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0, - 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e, - 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78, - 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8, - 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1, - 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a, - 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60, - 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf, - 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c, - 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d, - 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb, - 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f, - 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe, - 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1, - 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80, - 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2, - 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f, - 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d, - 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90, - 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb, - 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe, - 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7, - 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31, - 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe, - 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd, - 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99, - 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41, - 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1, - 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81, - 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63, - 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3, - 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff, - 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f, - 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14, - 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2, - 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4, - 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1, - 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b, - 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9, - 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18, - 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e, - 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9, - 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95, - 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30, - 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c, - 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73, - 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79, - 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59, - 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95, - 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4, - 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74, - 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49, - 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e, - 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42, - 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c, - 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b, - 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca, - 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c, - 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69, - 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a, - 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65, - 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96, - 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4, - 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1, - 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85, - 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf, - 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55, - 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58, - 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6, - 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e, - 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb, - 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd, - 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20, - 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63, - 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b, - 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27, - 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61, - 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44, - 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd, - 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91, - 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3, - 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0, - 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4, - 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74, - 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41, - 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02, - 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1, - 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06, - 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe, - 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59, - 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde, - 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89, - 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0, - 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb, - 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62, - 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5, - 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90, - 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02, - 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06, - 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91, - 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41, - 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37, - 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3, - 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71, - 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd, - 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100644 index 497b4d9a9..000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,551 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; - optional string database_id = 4; - - enum TransactionMode { - UNKNOWN = 0; - READ_ONLY = 1; - READ_WRITE = 2; - } - optional TransactionMode mode = 5 [default = UNKNOWN]; - - optional Transaction previous_transaction = 7; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index 9b4134e42..000000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "os" - - netcontext "golang.org/x/net/context" -) - -var ( - // This is set to true in identity_classic.go, which is behind the appengine build tag. - // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not - // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a - // first-gen runtime. See IsStandard below for the second-gen check. - appengineStandard bool - - // This is set to true in identity_flex.go, which is behind the appenginevm build tag. - appengineFlex bool -) - -// AppID is the implementation of the wrapper function of the same name in -// ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} - -// IsStandard is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsStandard() bool { - // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not - // second-gen (>= Go 1.11). - return appengineStandard || IsSecondGen() -} - -// IsStandard is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsSecondGen() bool { - // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. - return os.Getenv("GAE_ENV") == "standard" -} - -// IsFlex is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsFlex() bool { - return appengineFlex -} - -// IsAppEngine is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsAppEngine() bool { - return IsStandard() || IsFlex() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index 4e979f45e..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine" - - netcontext "golang.org/x/net/context" -) - -func init() { - appengineStandard = true -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.DefaultVersionHostname(c) -} - -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func RequestID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.RequestID(c) -} - -func ModuleName(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.ModuleName(c) -} -func VersionID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.VersionID(c) -} - -func fullyQualifiedAppID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return c.FullyQualifiedAppID() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go deleted file mode 100644 index d5e2e7b5e..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 Google LLC. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appenginevm - -package internal - -func init() { - appengineFlex = true -} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index 5d8067263..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "log" - "net/http" - "os" - "strings" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - c := fromContext(ctx) - if c == nil { - return nil - } - return c.Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { - return dc - } - // If the header isn't set, read zone from the metadata service. - // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE] - zone, err := getMetadata("instance/zone") - if err != nil { - log.Printf("Datacenter: %v", err) - return "" - } - parts := strings.Split(string(zone), "/") - if len(parts) == 0 { - return "" - } - return parts[len(parts)-1] -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - if s := os.Getenv("GAE_ENV"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - if s := os.Getenv("GAE_SERVICE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - if s := os.Getenv("GAE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" { - return appID - } - if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" { - return project - } - return string(mustGetMetadata("instance/attributes/gae_project")) -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - if s := os.Getenv("GAE_APPLICATION"); s != "" { - return s - } - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 051ea3980..000000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError " - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 8545ac4ad..000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,1313 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/log/log_service.proto - -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} -func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0} -} - -type LogServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} -func (*LogServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0} -} -func (m *LogServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogServiceError.Unmarshal(m, b) -} -func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic) -} -func (dst *LogServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogServiceError.Merge(dst, src) -} -func (m *LogServiceError) XXX_Size() int { - return xxx_messageInfo_LogServiceError.Size(m) -} -func (m *LogServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_LogServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_LogServiceError proto.InternalMessageInfo - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} -func (*UserAppLogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{1} -} -func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b) -} -func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic) -} -func (dst *UserAppLogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogLine.Merge(dst, src) -} -func (m *UserAppLogLine) XXX_Size() int { - return xxx_messageInfo_UserAppLogLine.Size(m) -} -func (m *UserAppLogLine) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} -func (*UserAppLogGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{2} -} -func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b) -} -func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic) -} -func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogGroup.Merge(dst, src) -} -func (m *UserAppLogGroup) XXX_Size() int { - return xxx_messageInfo_UserAppLogGroup.Size(m) -} -func (m *UserAppLogGroup) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} -func (*FlushRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{3} -} -func (m *FlushRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FlushRequest.Unmarshal(m, b) -} -func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) -} -func (dst *FlushRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FlushRequest.Merge(dst, src) -} -func (m *FlushRequest) XXX_Size() int { - return xxx_messageInfo_FlushRequest.Size(m) -} -func (m *FlushRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FlushRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FlushRequest proto.InternalMessageInfo - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} -func (*SetStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{4} -} -func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b) -} -func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic) -} -func (dst *SetStatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetStatusRequest.Merge(dst, src) -} -func (m *SetStatusRequest) XXX_Size() int { - return xxx_messageInfo_SetStatusRequest.Size(m) -} -func (m *SetStatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetStatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} -func (*LogOffset) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{5} -} -func (m *LogOffset) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogOffset.Unmarshal(m, b) -} -func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic) -} -func (dst *LogOffset) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogOffset.Merge(dst, src) -} -func (m *LogOffset) XXX_Size() int { - return xxx_messageInfo_LogOffset.Size(m) -} -func (m *LogOffset) XXX_DiscardUnknown() { - xxx_messageInfo_LogOffset.DiscardUnknown(m) -} - -var xxx_messageInfo_LogOffset proto.InternalMessageInfo - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} -func (*LogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{6} -} -func (m *LogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogLine.Unmarshal(m, b) -} -func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogLine.Marshal(b, m, deterministic) -} -func (dst *LogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogLine.Merge(dst, src) -} -func (m *LogLine) XXX_Size() int { - return xxx_messageInfo_LogLine.Size(m) -} -func (m *LogLine) XXX_DiscardUnknown() { - xxx_messageInfo_LogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_LogLine proto.InternalMessageInfo - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} -func (*RequestLog) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{7} -} -func (m *RequestLog) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RequestLog.Unmarshal(m, b) -} -func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic) -} -func (dst *RequestLog) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestLog.Merge(dst, src) -} -func (m *RequestLog) XXX_Size() int { - return xxx_messageInfo_RequestLog.Size(m) -} -func (m *RequestLog) XXX_DiscardUnknown() { - xxx_messageInfo_RequestLog.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestLog proto.InternalMessageInfo - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} -func (*LogModuleVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{8} -} -func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b) -} -func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic) -} -func (dst *LogModuleVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogModuleVersion.Merge(dst, src) -} -func (m *LogModuleVersion) XXX_Size() int { - return xxx_messageInfo_LogModuleVersion.Size(m) -} -func (m *LogModuleVersion) XXX_DiscardUnknown() { - xxx_messageInfo_LogModuleVersion.DiscardUnknown(m) -} - -var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} -func (*LogReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{9} -} -func (m *LogReadRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadRequest.Unmarshal(m, b) -} -func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic) -} -func (dst *LogReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadRequest.Merge(dst, src) -} -func (m *LogReadRequest) XXX_Size() int { - return xxx_messageInfo_LogReadRequest.Size(m) -} -func (m *LogReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} -func (*LogReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{10} -} -func (m *LogReadResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadResponse.Unmarshal(m, b) -} -func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic) -} -func (dst *LogReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadResponse.Merge(dst, src) -} -func (m *LogReadResponse) XXX_Size() int { - return xxx_messageInfo_LogReadResponse.Size(m) -} -func (m *LogReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} -func (*LogUsageRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{11} -} -func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b) -} -func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic) -} -func (dst *LogUsageRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRecord.Merge(dst, src) -} -func (m *LogUsageRecord) XXX_Size() int { - return xxx_messageInfo_LogUsageRecord.Size(m) -} -func (m *LogUsageRecord) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} -func (*LogUsageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{12} -} -func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b) -} -func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic) -} -func (dst *LogUsageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRequest.Merge(dst, src) -} -func (m *LogUsageRequest) XXX_Size() int { - return xxx_messageInfo_LogUsageRequest.Size(m) -} -func (m *LogUsageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} -func (*LogUsageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{13} -} -func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b) -} -func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic) -} -func (dst *LogUsageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageResponse.Merge(dst, src) -} -func (m *LogUsageResponse) XXX_Size() int { - return xxx_messageInfo_LogUsageResponse.Size(m) -} -func (m *LogUsageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { - proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError") - proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine") - proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup") - proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest") - proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest") - proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset") - proto.RegisterType((*LogLine)(nil), "appengine.LogLine") - proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog") - proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion") - proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest") - proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse") - proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord") - proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest") - proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d) -} - -var fileDescriptor_log_service_f054fd4b5012319d = []byte{ - // 1553 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6, - 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e, - 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40, - 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72, - 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d, - 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9, - 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32, - 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5, - 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3, - 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17, - 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f, - 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3, - 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46, - 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26, - 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31, - 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5, - 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd, - 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46, - 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe, - 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc, - 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1, - 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f, - 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39, - 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35, - 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd, - 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b, - 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c, - 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0, - 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36, - 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f, - 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a, - 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed, - 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95, - 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91, - 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87, - 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1, - 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5, - 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf, - 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce, - 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66, - 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0, - 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62, - 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c, - 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba, - 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a, - 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa, - 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4, - 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1, - 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62, - 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b, - 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4, - 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90, - 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78, - 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff, - 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56, - 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5, - 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b, - 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21, - 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69, - 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98, - 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51, - 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17, - 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c, - 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f, - 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f, - 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b, - 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10, - 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54, - 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d, - 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98, - 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0, - 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0, - 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61, - 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83, - 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3, - 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75, - 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38, - 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37, - 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e, - 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d, - 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65, - 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43, - 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06, - 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2, - 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea, - 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64, - 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1, - 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf, - 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36, - 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53, - 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7, - 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56, - 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b, - 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77, - 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05, - 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd, - 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00, - 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc475..000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go deleted file mode 100644 index 1e765312f..000000000 --- a/vendor/google.golang.org/appengine/internal/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine_internal" -) - -func Main() { - MainPath = "" - appengine_internal.Main() -} diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go deleted file mode 100644 index 357dce4dd..000000000 --- a/vendor/google.golang.org/appengine/internal/main_common.go +++ /dev/null @@ -1,7 +0,0 @@ -package internal - -// MainPath stores the file path of the main package. On App Engine Standard -// using Go version 1.9 and below, this will be unset. On App Engine Flex and -// App Engine Standard second-gen (Go 1.11 and above), this will be the -// filepath to package main. -var MainPath string diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go deleted file mode 100644 index ddb79a333..000000000 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "io" - "log" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" -) - -func Main() { - MainPath = filepath.Dir(findMainPath()) - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - host := "" - if IsDevAppServer() { - host = "127.0.0.1" - } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -// Find the path to package main by looking at the root Caller. -func findMainPath() string { - pc := make([]uintptr, 100) - n := runtime.Callers(2, pc) - frames := runtime.CallersFrames(pc[:n]) - for { - frame, more := frames.Next() - // Tests won't have package main, instead they have testing.tRunner - if frame.Function == "main.main" || frame.Function == "testing.tRunner" { - return frame.File - } - if !more { - break - } - } - return "" -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index c4ba63bb4..000000000 --- a/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err)) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index fe429720e..000000000 --- a/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 10*time.Second) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100644 index 2fdb546a6..000000000 --- a/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 8d782a38e..000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto - -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} -func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0} -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Request.Unmarshal(m, b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) -} -func (dst *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(dst, src) -} -func (m *Request) XXX_Size() int { - return xxx_messageInfo_Request.Size(m) -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} -func (*ApplicationError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{1} -} -func (m *ApplicationError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplicationError.Unmarshal(m, b) -} -func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic) -} -func (dst *ApplicationError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplicationError.Merge(dst, src) -} -func (m *ApplicationError) XXX_Size() int { - return xxx_messageInfo_ApplicationError.Size(m) -} -func (m *ApplicationError) XXX_DiscardUnknown() { - xxx_messageInfo_ApplicationError.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplicationError proto.InternalMessageInfo - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} -func (*RpcError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2} -} -func (m *RpcError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RpcError.Unmarshal(m, b) -} -func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RpcError.Marshal(b, m, deterministic) -} -func (dst *RpcError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RpcError.Merge(dst, src) -} -func (m *RpcError) XXX_Size() int { - return xxx_messageInfo_RpcError.Size(m) -} -func (m *RpcError) XXX_DiscardUnknown() { - xxx_messageInfo_RpcError.DiscardUnknown(m) -} - -var xxx_messageInfo_RpcError proto.InternalMessageInfo - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{3} -} -func (m *Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Response.Unmarshal(m, b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) -} -func (dst *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(dst, src) -} -func (m *Response) XXX_Size() int { - return xxx_messageInfo_Response.Size(m) -} -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Response proto.InternalMessageInfo - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { - proto.RegisterType((*Request)(nil), "remote_api.Request") - proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError") - proto.RegisterType((*RpcError)(nil), "remote_api.RpcError") - proto.RegisterType((*Response)(nil), "remote_api.Response") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d) -} - -var fileDescriptor_remote_api_1978114ec33a273d = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42, - 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e, - 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c, - 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2, - 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa, - 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a, - 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98, - 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6, - 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca, - 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60, - 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9, - 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a, - 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba, - 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6, - 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86, - 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf, - 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21, - 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f, - 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53, - 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2, - 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f, - 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3, - 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0, - 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef, - 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64, - 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b, - 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5, - 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c, - 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf, - 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7, - 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e, - 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f, - 0x03, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a4e..000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 9006ae653..000000000 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx netcontext.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { - if transactionFromContext(c) != nil { - return nil, errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if previousTransaction != nil { - req.PreviousTransaction = previousTransaction - } - if readOnly { - req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum() - } else { - req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum() - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return nil, err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return &t.transaction, err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return &t.transaction, ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return &t.transaction, ErrConcurrentTransaction - } - } - return &t.transaction, err -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go deleted file mode 100644 index 5f727750a..000000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go +++ /dev/null @@ -1,527 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto - -package urlfetch - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type URLFetchServiceError_ErrorCode int32 - -const ( - URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 - URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 - URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 - URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 - URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 - URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 - URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 - URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 - URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 - URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 - URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 - URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 - URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 -) - -var URLFetchServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_URL", - 2: "FETCH_ERROR", - 3: "UNSPECIFIED_ERROR", - 4: "RESPONSE_TOO_LARGE", - 5: "DEADLINE_EXCEEDED", - 6: "SSL_CERTIFICATE_ERROR", - 7: "DNS_ERROR", - 8: "CLOSED", - 9: "INTERNAL_TRANSIENT_ERROR", - 10: "TOO_MANY_REDIRECTS", - 11: "MALFORMED_REPLY", - 12: "CONNECTION_ERROR", -} -var URLFetchServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_URL": 1, - "FETCH_ERROR": 2, - "UNSPECIFIED_ERROR": 3, - "RESPONSE_TOO_LARGE": 4, - "DEADLINE_EXCEEDED": 5, - "SSL_CERTIFICATE_ERROR": 6, - "DNS_ERROR": 7, - "CLOSED": 8, - "INTERNAL_TRANSIENT_ERROR": 9, - "TOO_MANY_REDIRECTS": 10, - "MALFORMED_REPLY": 11, - "CONNECTION_ERROR": 12, -} - -func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { - p := new(URLFetchServiceError_ErrorCode) - *p = x - return p -} -func (x URLFetchServiceError_ErrorCode) String() string { - return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) -} -func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") - if err != nil { - return err - } - *x = URLFetchServiceError_ErrorCode(value) - return nil -} -func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0} -} - -type URLFetchRequest_RequestMethod int32 - -const ( - URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 - URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 - URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 - URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 - URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 - URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 -) - -var URLFetchRequest_RequestMethod_name = map[int32]string{ - 1: "GET", - 2: "POST", - 3: "HEAD", - 4: "PUT", - 5: "DELETE", - 6: "PATCH", -} -var URLFetchRequest_RequestMethod_value = map[string]int32{ - "GET": 1, - "POST": 2, - "HEAD": 3, - "PUT": 4, - "DELETE": 5, - "PATCH": 6, -} - -func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { - p := new(URLFetchRequest_RequestMethod) - *p = x - return p -} -func (x URLFetchRequest_RequestMethod) String() string { - return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) -} -func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") - if err != nil { - return err - } - *x = URLFetchRequest_RequestMethod(value) - return nil -} -func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} -} - -type URLFetchServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } -func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } -func (*URLFetchServiceError) ProtoMessage() {} -func (*URLFetchServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0} -} -func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b) -} -func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic) -} -func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchServiceError.Merge(dst, src) -} -func (m *URLFetchServiceError) XXX_Size() int { - return xxx_messageInfo_URLFetchServiceError.Size(m) -} -func (m *URLFetchServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo - -type URLFetchRequest struct { - Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` - Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` - Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` - Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` - FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` - Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` - MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } -func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest) ProtoMessage() {} -func (*URLFetchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1} -} -func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b) -} -func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic) -} -func (dst *URLFetchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchRequest.Merge(dst, src) -} -func (m *URLFetchRequest) XXX_Size() int { - return xxx_messageInfo_URLFetchRequest.Size(m) -} -func (m *URLFetchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo - -const Default_URLFetchRequest_FollowRedirects bool = true -const Default_URLFetchRequest_MustValidateServerCertificate bool = true - -func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { - if m != nil && m.Method != nil { - return *m.Method - } - return URLFetchRequest_GET -} - -func (m *URLFetchRequest) GetUrl() string { - if m != nil && m.Url != nil { - return *m.Url - } - return "" -} - -func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchRequest) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func (m *URLFetchRequest) GetFollowRedirects() bool { - if m != nil && m.FollowRedirects != nil { - return *m.FollowRedirects - } - return Default_URLFetchRequest_FollowRedirects -} - -func (m *URLFetchRequest) GetDeadline() float64 { - if m != nil && m.Deadline != nil { - return *m.Deadline - } - return 0 -} - -func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { - if m != nil && m.MustValidateServerCertificate != nil { - return *m.MustValidateServerCertificate - } - return Default_URLFetchRequest_MustValidateServerCertificate -} - -type URLFetchRequest_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } -func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest_Header) ProtoMessage() {} -func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} -} -func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b) -} -func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic) -} -func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src) -} -func (m *URLFetchRequest_Header) XXX_Size() int { - return xxx_messageInfo_URLFetchRequest_Header.Size(m) -} -func (m *URLFetchRequest_Header) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo - -func (m *URLFetchRequest_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchRequest_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type URLFetchResponse struct { - Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` - StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` - Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` - ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` - ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` - ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` - FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` - ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` - ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` - ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } -func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse) ProtoMessage() {} -func (*URLFetchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2} -} -func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b) -} -func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic) -} -func (dst *URLFetchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchResponse.Merge(dst, src) -} -func (m *URLFetchResponse) XXX_Size() int { - return xxx_messageInfo_URLFetchResponse.Size(m) -} -func (m *URLFetchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo - -const Default_URLFetchResponse_ContentWasTruncated bool = false -const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 -const Default_URLFetchResponse_ApiBytesSent int64 = 0 -const Default_URLFetchResponse_ApiBytesReceived int64 = 0 - -func (m *URLFetchResponse) GetContent() []byte { - if m != nil { - return m.Content - } - return nil -} - -func (m *URLFetchResponse) GetStatusCode() int32 { - if m != nil && m.StatusCode != nil { - return *m.StatusCode - } - return 0 -} - -func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchResponse) GetContentWasTruncated() bool { - if m != nil && m.ContentWasTruncated != nil { - return *m.ContentWasTruncated - } - return Default_URLFetchResponse_ContentWasTruncated -} - -func (m *URLFetchResponse) GetExternalBytesSent() int64 { - if m != nil && m.ExternalBytesSent != nil { - return *m.ExternalBytesSent - } - return 0 -} - -func (m *URLFetchResponse) GetExternalBytesReceived() int64 { - if m != nil && m.ExternalBytesReceived != nil { - return *m.ExternalBytesReceived - } - return 0 -} - -func (m *URLFetchResponse) GetFinalUrl() string { - if m != nil && m.FinalUrl != nil { - return *m.FinalUrl - } - return "" -} - -func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { - if m != nil && m.ApiCpuMilliseconds != nil { - return *m.ApiCpuMilliseconds - } - return Default_URLFetchResponse_ApiCpuMilliseconds -} - -func (m *URLFetchResponse) GetApiBytesSent() int64 { - if m != nil && m.ApiBytesSent != nil { - return *m.ApiBytesSent - } - return Default_URLFetchResponse_ApiBytesSent -} - -func (m *URLFetchResponse) GetApiBytesReceived() int64 { - if m != nil && m.ApiBytesReceived != nil { - return *m.ApiBytesReceived - } - return Default_URLFetchResponse_ApiBytesReceived -} - -type URLFetchResponse_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } -func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse_Header) ProtoMessage() {} -func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) { - return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0} -} -func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b) -} -func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic) -} -func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src) -} -func (m *URLFetchResponse_Header) XXX_Size() int { - return xxx_messageInfo_URLFetchResponse_Header.Size(m) -} -func (m *URLFetchResponse_Header) XXX_DiscardUnknown() { - xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo - -func (m *URLFetchResponse_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchResponse_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -func init() { - proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError") - proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest") - proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header") - proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse") - proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced) -} - -var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{ - // 770 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54, - 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29, - 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e, - 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d, - 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b, - 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27, - 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92, - 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7, - 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17, - 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec, - 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c, - 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a, - 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01, - 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14, - 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f, - 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07, - 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87, - 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a, - 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a, - 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37, - 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc, - 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde, - 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71, - 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17, - 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea, - 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4, - 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6, - 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96, - 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d, - 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d, - 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb, - 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad, - 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86, - 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20, - 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e, - 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f, - 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21, - 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c, - 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b, - 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6, - 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02, - 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b, - 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9, - 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e, - 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97, - 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3, - 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8, - 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05, - 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto deleted file mode 100644 index f695edf6a..000000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "urlfetch"; - -package appengine; - -message URLFetchServiceError { - enum ErrorCode { - OK = 0; - INVALID_URL = 1; - FETCH_ERROR = 2; - UNSPECIFIED_ERROR = 3; - RESPONSE_TOO_LARGE = 4; - DEADLINE_EXCEEDED = 5; - SSL_CERTIFICATE_ERROR = 6; - DNS_ERROR = 7; - CLOSED = 8; - INTERNAL_TRANSIENT_ERROR = 9; - TOO_MANY_REDIRECTS = 10; - MALFORMED_REPLY = 11; - CONNECTION_ERROR = 12; - } -} - -message URLFetchRequest { - enum RequestMethod { - GET = 1; - POST = 2; - HEAD = 3; - PUT = 4; - DELETE = 5; - PATCH = 6; - } - required RequestMethod Method = 1; - required string Url = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bytes Payload = 6 [ctype=CORD]; - - optional bool FollowRedirects = 7 [default=true]; - - optional double Deadline = 8; - - optional bool MustValidateServerCertificate = 9 [default=true]; -} - -message URLFetchResponse { - optional bytes Content = 1; - required int32 StatusCode = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bool ContentWasTruncated = 6 [default=false]; - optional int64 ExternalBytesSent = 7; - optional int64 ExternalBytesReceived = 8; - - optional string FinalUrl = 9; - - optional int64 ApiCpuMilliseconds = 10 [default=0]; - optional int64 ApiBytesSent = 11 [default=0]; - optional int64 ApiBytesReceived = 12 [default=0]; -} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go deleted file mode 100644 index 6ffe1e6d9..000000000 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package urlfetch provides an http.RoundTripper implementation -// for fetching URLs via App Engine's urlfetch service. -package urlfetch // import "google.golang.org/appengine/urlfetch" - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/urlfetch" -) - -// Transport is an implementation of http.RoundTripper for -// App Engine. Users should generally create an http.Client using -// this transport and use the Client rather than using this transport -// directly. -type Transport struct { - Context context.Context - - // Controls whether the application checks the validity of SSL certificates - // over HTTPS connections. A value of false (the default) instructs the - // application to send a request to the server only if the certificate is - // valid and signed by a trusted certificate authority (CA), and also - // includes a hostname that matches the certificate. A value of true - // instructs the application to perform no certificate validation. - AllowInvalidServerCertificate bool -} - -// Verify statically that *Transport implements http.RoundTripper. -var _ http.RoundTripper = (*Transport)(nil) - -// Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. -// -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. -func Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &Transport{ - Context: ctx, - }, - } -} - -type bodyReader struct { - content []byte - truncated bool - closed bool -} - -// ErrTruncatedBody is the error returned after the final Read() from a -// response's Body if the body has been truncated by App Engine's proxy. -var ErrTruncatedBody = errors.New("urlfetch: truncated body") - -func statusCodeToText(code int) string { - if t := http.StatusText(code); t != "" { - return t - } - return strconv.Itoa(code) -} - -func (br *bodyReader) Read(p []byte) (n int, err error) { - if br.closed { - if br.truncated { - return 0, ErrTruncatedBody - } - return 0, io.EOF - } - n = copy(p, br.content) - if n > 0 { - br.content = br.content[n:] - return - } - if br.truncated { - br.closed = true - return 0, ErrTruncatedBody - } - return 0, io.EOF -} - -func (br *bodyReader) Close() error { - br.closed = true - br.content = nil - return nil -} - -// A map of the URL Fetch-accepted methods that take a request body. -var methodAcceptsRequestBody = map[string]bool{ - "POST": true, - "PUT": true, - "PATCH": true, -} - -// urlString returns a valid string given a URL. This function is necessary because -// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. -// See http://code.google.com/p/go/issues/detail?id=4860. -func urlString(u *url.URL) string { - if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { - return u.String() - } - aux := *u - aux.Opaque = "//" + aux.Host + aux.Opaque - return aux.String() -} - -// RoundTrip issues a single HTTP request and returns its response. Per the -// http.RoundTripper interface, RoundTrip only returns an error if there -// was an unsupported request or the URL Fetch proxy fails. -// Note that HTTP response codes such as 5xx, 403, 404, etc are not -// errors as far as the transport is concerned and will be returned -// with err set to nil. -func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { - methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] - if !ok { - return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) - } - - method := pb.URLFetchRequest_RequestMethod(methNum) - - freq := &pb.URLFetchRequest{ - Method: &method, - Url: proto.String(urlString(req.URL)), - FollowRedirects: proto.Bool(false), // http.Client's responsibility - MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), - } - if deadline, ok := t.Context.Deadline(); ok { - freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) - } - - for k, vals := range req.Header { - for _, val := range vals { - freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ - Key: proto.String(k), - Value: proto.String(val), - }) - } - } - if methodAcceptsRequestBody[req.Method] && req.Body != nil { - // Avoid a []byte copy if req.Body has a Bytes method. - switch b := req.Body.(type) { - case interface { - Bytes() []byte - }: - freq.Payload = b.Bytes() - default: - freq.Payload, err = ioutil.ReadAll(req.Body) - if err != nil { - return nil, err - } - } - } - - fres := &pb.URLFetchResponse{} - if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { - return nil, err - } - - res = &http.Response{} - res.StatusCode = int(*fres.StatusCode) - res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) - res.Header = make(http.Header) - res.Request = req - - // Faked: - res.ProtoMajor = 1 - res.ProtoMinor = 1 - res.Proto = "HTTP/1.1" - res.Close = true - - for _, h := range fres.Header { - hkey := http.CanonicalHeaderKey(*h.Key) - hval := *h.Value - if hkey == "Content-Length" { - // Will get filled in below for all but HEAD requests. - if req.Method == "HEAD" { - res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) - } - continue - } - res.Header.Add(hkey, hval) - } - - if req.Method != "HEAD" { - res.ContentLength = int64(len(fres.Content)) - } - - truncated := fres.GetContentWasTruncated() - res.Body = &bodyReader{content: fres.Content, truncated: truncated} - return -} - -func init() { - internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) - internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) -} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112bc..24bc98ac4 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 95967e811..1f57e6610 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal writes the given [proto.Message] in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index a45625c8d..87e46bd4d 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu {rv.MethodByName("Values"), "Values"}, {rv.MethodByName("ReservedNames"), "ReservedNames"}, {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, }...) case protoreflect.EnumValueDescriptor: diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 18f075687..ff6a38360 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 000000000..029a6a12d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import descriptorpb "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d20837..7e87c7604 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e786..099b2bf45 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35e..c2d6bd526 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 8826bcf40..df53ff40b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -108,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -202,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -251,10 +258,6 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -331,8 +334,7 @@ func (fd *Field) HasPresence() bool { if fd.L1.Cardinality == protoreflect.Repeated { return false } - explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence - return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional @@ -345,14 +347,7 @@ func (fd *Field) IsPacked() bool { case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: return false } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsPacked - } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { - // proto3 repeated fields are packed by default. - return !fd.L1.HasPacked || fd.L1.IsPacked - } - return fd.L1.IsPacked + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -388,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -399,13 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsUTF8Validated - } - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -438,7 +431,6 @@ type ( Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -461,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -542,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -585,6 +587,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -605,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 237e64fd2..8a57d60b0 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 case "editions": fd.L1.Syntax = protoreflect.Editions default: @@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 } - if fd.L1.Syntax == protoreflect.Editions { - fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) - } + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) // Parse editions features from options if any if options != nil { @@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -467,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -499,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 482a61cc1..e56c91a8d 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. @@ -466,10 +471,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } - if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { fd.L1.Kind = protoreflect.GroupKind } - if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + if fd.L1.EditionFeatures.IsLegacyRequired { fd.L1.Cardinality = protoreflect.Required } if rawTypeName != nil { @@ -496,13 +501,11 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -548,7 +551,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte - xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -572,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -580,12 +581,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } - if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { - xd.L1.Kind = protoreflect.GroupKind - } - if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { - xd.L1.Cardinality = protoreflect.Required - } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -598,32 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldOptions_Features_field_number: - xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fdc..f4107c05f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 0375a49d4..11f5f356b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -14,9 +14,13 @@ import ( ) var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} func init() { unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) } func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { @@ -104,12 +108,15 @@ func unmarshalEditionDefault(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: fs = unmarshalFeatureSet(v, fs) } } } defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) } func unmarshalEditionDefaults(b []byte) { @@ -135,8 +142,15 @@ func unmarshalEditionDefaults(b []byte) { } func getFeaturesFor(ed Edition) EditionFeatures { - if def, ok := defaultsCache[ed]; ok { - return def + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) } - panic(fmt.Sprintf("unsupported edition: %v", ed)) + return defaultsCache[match] } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc5..bfb3b8417 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4ef..ba83fea44 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 40272c893..f30ab6b58 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -21,6 +21,7 @@ const ( // Enum values for google.protobuf.Edition. const ( Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 Edition_EDITION_PROTO2_enum_value = 998 Edition_EDITION_PROTO3_enum_value = 999 Edition_EDITION_2023_enum_value = 1000 @@ -653,6 +654,7 @@ const ( FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -667,6 +669,7 @@ const ( FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -684,6 +687,7 @@ const ( FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -767,6 +771,33 @@ const ( FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -829,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -842,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -1110,17 +1144,20 @@ const ( // Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" ) // Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 ) // Names for google.protobuf.SourceCodeInfo. diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index fd9015e8e..9a652a2b4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -10,7 +10,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" ) -const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" // Names for google.protobuf.GoFeatures. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98de..5d5771c2e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041edc..f29e6a8fa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c2..4bb0a7a20 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241e..78ee47e44 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16f..fb35f0bae 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf1..7a16ec13d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2efa..e06ece55a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f89136516..18cb96fd7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a057..304244a65 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6e..febd21224 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0bae..e31249f64 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb2..81b2b1a76 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d050..6e8677ee6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab091086..b649f1124 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 2ab2c6297..bf0b6049b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type methods := make([]reflect.Method, 0, 2) @@ -215,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -307,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -344,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { @@ -563,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdce..019399d45 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010be..ecb4623d7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +292,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +302,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -394,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -422,13 +421,13 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +456,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b6..99dc23c6f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e94434..da685e8a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e311..5f20ca5d8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e5..a1f09162d 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a50fcfb49..dbbf1f686 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 33 - Patch = 0 + Minor = 34 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index e5b03b567..d75a6534c 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,6 +51,8 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 4fed202f9..1f847bcc3 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -70,7 +75,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 17899a3a7..d248f2928 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,18 +11,21 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent @@ -36,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -48,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -75,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45c..575d14831 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49b..052fb5ae3 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index baa0cc621..8fbecb4f5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,7 @@ package protodesc import ( + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -91,15 +92,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 case "editions": f.L1.Syntax = protoreflect.Editions f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) } f.L1.Path = fd.GetName() @@ -114,9 +117,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } - if f.L1.Syntax == protoreflect.Editions { - initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) - } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -219,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index b3278163c..856175542 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -69,9 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } - if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) - } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -146,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -163,32 +163,12 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc f.L1.StringName.InitJSON(fd.GetJsonName()) } - if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { - f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) - - if f.L1.EditionFeatures.IsLegacyRequired { - f.L1.Cardinality = protoreflect.Required - } - // We reuse the existing field because the old option `[packed = - // true]` is mutually exclusive with the editions feature. - if canBePacked(fd) { - f.L1.HasPacked = true - f.L1.IsPacked = f.L1.EditionFeatures.IsPacked - } - - // We pretend this option is always explicitly set because the only - // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 - // or to return the appropriate default. - // When using editions we either parse the option or resolve the - // appropriate default here (instead of later when this option is - // requested from the descriptor). - // In proto2/proto3 syntax HasEnforceUTF8 might be false. - f.L1.HasEnforceUTF8 = true - f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } - if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { - f.L1.Kind = protoreflect.GroupKind - } + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind } } return fs, nil @@ -201,12 +181,10 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } - if parent.Syntax() == protoreflect.Editions { - o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures()) - } } } return os, nil @@ -220,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca5854..f3cebab29 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index e4dcaf876..6de31c2eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if e.Syntax() == protoreflect.Proto3 { + if !e.IsClosed() { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in proto3 do not conflict if the + // Verify that value names in open enums do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) for i, md := range mds { m := &ms[i] @@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if m.Syntax() == protoreflect.Proto3 { + if isProto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { @@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { + if !isProto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -162,26 +152,29 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if f.IsWeak() && !flags.ProtoLegacy { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { return errors.New("message field %q may only be weak for an optional message", f.FullName()) } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(f); err != nil { + if err := checkValidGroup(file, f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if f.Syntax() == protoreflect.Proto3 { + if isProto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) } } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -215,17 +208,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -267,13 +260,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(x); err != nil { + if err := checkValidGroup(f, x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if x.Syntax() == protoreflect.Proto3 { + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -309,21 +302,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() == protoreflect.Proto3: + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 2a6b29d17..804830eda 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -17,11 +17,6 @@ import ( gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" ) -const ( - SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 - SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 -) - var defaults = &descriptorpb.FeatureSetDefaults{} var defaultsCacheMu sync.Mutex var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) @@ -67,18 +62,20 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) os.Exit(1) } - fs := defaults.GetDefaults()[0].GetFeatures() + fsed := defaults.GetDefaults()[0] // Using a linear search for now. // Editions are guaranteed to be sorted and thus we could use a binary search. // Given that there are only a handful of editions (with one more per year) // there is not much reason to use a binary search. for _, def := range defaults.GetDefaults() { if def.GetEdition() <= edpb { - fs = def.GetFeatures() + fsed = def } else { break } } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) defaultsCache[ed] = fs return fs } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index 9d6e05420..a5de8d400 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -73,6 +73,16 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } return p } @@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 00b01fbd8..c85bfaa5b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -161,7 +161,7 @@ const ( // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 7dcc2ff09..ea154eec4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -373,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) case 21: b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -483,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -519,6 +523,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { return b } +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 60ff62b4c..cd8fadbaf 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and @@ -544,6 +544,12 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f4..75f83a2af 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 160309731..9fe83cef5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e8..7f3583ead 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 435470111..f7d386990 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52a..de1777339 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 78624cf60..9403eb075 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -54,6 +54,9 @@ type Edition int32 const ( // A placeholder for an unknown edition value. Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 // Legacy syntax "editions". These pre-date editions, but behave much like // distinct editions. These can't be used to specify the edition of proto // files, but feature definitions must supply proto2/proto3 defaults for @@ -82,6 +85,7 @@ const ( var ( Edition_name = map[int32]string{ 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", 998: "EDITION_PROTO2", 999: "EDITION_PROTO3", 1000: "EDITION_2023", @@ -95,6 +99,7 @@ var ( } Edition_value = map[string]int32{ "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, "EDITION_PROTO2": 998, "EDITION_PROTO3": 999, "EDITION_2023": 1000, @@ -2177,12 +2182,16 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -2679,7 +2688,8 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2811,6 +2821,13 @@ func (x *FieldOptions) GetFeatures() *FeatureSet { return nil } +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2995,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3058,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -3968,6 +3994,88 @@ func (x *FieldOptions_EditionDefault) GetValue() string { return "" } +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3985,7 +4093,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3998,7 +4106,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4037,14 +4145,17 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4057,7 +4168,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4080,9 +4191,16 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { return Edition_EDITION_UNKNOWN } -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { if x != nil { - return x.Features + return x.OverridableFeatures + } + return nil +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures } return nil } @@ -4188,7 +4306,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4201,7 +4319,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4275,7 +4393,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4288,7 +4406,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4597,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, @@ -4670,405 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, - 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, - 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, - 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, - 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, - 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, - 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, - 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, - 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, - 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, - 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, - 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, - 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, - 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07, - 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, - 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, - 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, - 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, - 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, - 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, - 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, - 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, - 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, - 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, - 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, - 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, - 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, - 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, - 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, - 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, - 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5084,8 +5242,8 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { } var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5131,10 +5289,11 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto @@ -5179,40 +5338,46 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet - 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 71, // [71:71] is the sub-list for method output_type - 71, // [71:71] is the sub-list for method input_type - 71, // [71:71] is the sub-list for extension type_name - 71, // [71:71] is the sub-list for extension extendee - 0, // [0:71] is the sub-list for field type_name + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5221,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5233,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5245,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5257,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5271,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5283,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5295,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5307,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5319,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5331,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5343,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5357,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5371,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5385,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5399,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5413,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5427,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5441,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5455,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5467,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5481,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5493,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5505,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5517,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5529,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5541,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5553,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5565,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5577,7 +5742,19 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_FeatureSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5589,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5601,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5613,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -5632,7 +5809,7 @@ func file_google_protobuf_descriptor_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, NumEnums: 17, - NumMessages: 32, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index 25de5ae00..a2ca940c5 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -6,9 +6,9 @@ // https://developers.google.com/open-source/licenses/bsd // Code generated by protoc-gen-go. DO NOT EDIT. -// source: reflect/protodesc/proto/go_features.proto +// source: google/protobuf/go_features.proto -package proto +package gofeaturespb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -30,7 +30,7 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + mi := &file_google_protobuf_go_features_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -43,7 +43,7 @@ func (x *GoFeatures) String() string { func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { - mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0] + mi := &file_google_protobuf_go_features_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -56,7 +56,7 @@ func (x *GoFeatures) ProtoReflect() protoreflect.Message { // Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. func (*GoFeatures) Descriptor() ([]byte, []int) { - return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0} + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} } func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { @@ -66,69 +66,73 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } -var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), ExtensionType: (*GoFeatures)(nil), Field: 1002, - Name: "google.protobuf.go", + Name: "pb.go", Tag: "bytes,1002,opt,name=go", - Filename: "reflect/protodesc/proto/go_features.proto", + Filename: "google/protobuf/go_features.proto", }, } // Extension fields to descriptorpb.FeatureSet. var ( - // optional google.protobuf.GoFeatures go = 1002; - E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0] + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] ) -var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor - -var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64, - 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, - 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, - 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, - 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f, - 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( - file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once - file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc ) -func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte { - file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() { - file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData) +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) }) - return file_reflect_protodesc_proto_go_features_proto_rawDescData + return file_google_protobuf_go_features_proto_rawDescData } -var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{ - (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []any{ + (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } -var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ - 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet - 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet + 0, // 1: pb.go:type_name -> pb.GoFeatures 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 1, // [1:2] is the sub-list for extension type_name @@ -136,13 +140,13 @@ var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for field type_name } -func init() { file_reflect_protodesc_proto_go_features_proto_init() } -func file_reflect_protodesc_proto_go_features_proto_init() { - if File_reflect_protodesc_proto_go_features_proto != nil { +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GoFeatures); i { case 0: return &v.state @@ -159,19 +163,19 @@ func file_reflect_protodesc_proto_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc, + RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, NumEnums: 0, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, - GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes, - DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs, - MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes, - ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() - File_reflect_protodesc_proto_go_features_proto = out.File - file_reflect_protodesc_proto_go_features_proto_rawDesc = nil - file_reflect_protodesc_proto_go_features_proto_goTypes = nil - file_reflect_protodesc_proto_go_features_proto_depIdxs = nil + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_rawDesc = nil + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto deleted file mode 100644 index d24657129..000000000 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto +++ /dev/null @@ -1,28 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2023 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd - -syntax = "proto2"; - -package google.protobuf; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/protobuf/types/gofeaturespb"; - -extend google.protobuf.FeatureSet { - optional GoFeatures go = 1002; -} - -message GoFeatures { - // Whether or not to generate the deprecated UnmarshalJSON method for enums. - optional bool legacy_unmarshal_json_enum = 1 [ - retention = RETENTION_RUNTIME, - targets = TARGET_TYPE_ENUM, - edition_defaults = { edition: EDITION_PROTO2, value: "true" }, - edition_defaults = { edition: EDITION_PROTO3, value: "false" } - ]; -} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be54..7172b43d3 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8dd..1b71bcd91 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a336..83a5a645b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/github.com/evanphx/json-patch/.gitignore b/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore similarity index 100% rename from vendor/github.com/evanphx/json-patch/.gitignore rename to vendor/gopkg.in/evanphx/json-patch.v4/.gitignore diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE similarity index 100% rename from vendor/github.com/evanphx/json-patch/LICENSE rename to vendor/gopkg.in/evanphx/json-patch.v4/LICENSE diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/gopkg.in/evanphx/json-patch.v4/README.md similarity index 100% rename from vendor/github.com/evanphx/json-patch/README.md rename to vendor/gopkg.in/evanphx/json-patch.v4/README.md diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/gopkg.in/evanphx/json-patch.v4/errors.go similarity index 100% rename from vendor/github.com/evanphx/json-patch/errors.go rename to vendor/gopkg.in/evanphx/json-patch.v4/errors.go diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/gopkg.in/evanphx/json-patch.v4/merge.go similarity index 100% rename from vendor/github.com/evanphx/json-patch/merge.go rename to vendor/gopkg.in/evanphx/json-patch.v4/merge.go diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go similarity index 95% rename from vendor/github.com/evanphx/json-patch/patch.go rename to vendor/gopkg.in/evanphx/json-patch.v4/patch.go index 4bce5936d..dc2b7e51e 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/gopkg.in/evanphx/json-patch.v4/patch.go @@ -568,6 +568,29 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = &val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + con, key := findObject(doc, path) if con == nil { @@ -634,6 +657,25 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = *sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + con, key := findObject(doc, path) if con == nil { diff --git a/vendor/k8s.io/api/admissionregistration/v1/doc.go b/vendor/k8s.io/api/admissionregistration/v1/doc.go index c3940f090..ca0086188 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=admissionregistration.k8s.io // Package v1 is the v1 version of the API. diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go index 0f8019c54..09295734d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto +// source: k8s.io/api/admissionregistration/v1/generated.proto package v1 @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -44,10 +45,122 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + +func (m *MatchResources) Reset() { *m = MatchResources{} } +func (*MatchResources) ProtoMessage() {} +func (*MatchResources) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{3} +} +func (m *MatchResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchResources.Merge(m, src) +} +func (m *MatchResources) XXX_Size() int { + return m.Size() +} +func (m *MatchResources) XXX_DiscardUnknown() { + xxx_messageInfo_MatchResources.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchResources proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{0} + return fileDescriptor_3205c7dc5bf0c9bf, []int{4} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +188,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{1} + return fileDescriptor_3205c7dc5bf0c9bf, []int{5} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +216,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{2} + return fileDescriptor_3205c7dc5bf0c9bf, []int{6} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -128,10 +241,94 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo +func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } +func (*NamedRuleWithOperations) ProtoMessage() {} +func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{7} +} +func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRuleWithOperations.Merge(m, src) +} +func (m *NamedRuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *NamedRuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo + +func (m *ParamKind) Reset() { *m = ParamKind{} } +func (*ParamKind) ProtoMessage() {} +func (*ParamKind) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{8} +} +func (m *ParamKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamKind.Merge(m, src) +} +func (m *ParamKind) XXX_Size() int { + return m.Size() +} +func (m *ParamKind) XXX_DiscardUnknown() { + xxx_messageInfo_ParamKind.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamKind proto.InternalMessageInfo + +func (m *ParamRef) Reset() { *m = ParamRef{} } +func (*ParamRef) ProtoMessage() {} +func (*ParamRef) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{9} +} +func (m *ParamRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamRef.Merge(m, src) +} +func (m *ParamRef) XXX_Size() int { + return m.Size() +} +func (m *ParamRef) XXX_DiscardUnknown() { + xxx_messageInfo_ParamRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamRef proto.InternalMessageInfo + func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{3} + return fileDescriptor_3205c7dc5bf0c9bf, []int{10} } func (m *Rule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +356,7 @@ var xxx_messageInfo_Rule proto.InternalMessageInfo func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } func (*RuleWithOperations) ProtoMessage() {} func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{4} + return fileDescriptor_3205c7dc5bf0c9bf, []int{11} } func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +384,7 @@ var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{5} + return fileDescriptor_3205c7dc5bf0c9bf, []int{12} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -212,10 +409,234 @@ func (m *ServiceReference) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceReference proto.InternalMessageInfo +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{13} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } +func (*ValidatingAdmissionPolicy) ProtoMessage() {} +func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{14} +} +func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src) +} +func (m *ValidatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } +func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{15} +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } +func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{16} +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } +func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{17} +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } +func (*ValidatingAdmissionPolicyList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{18} +} +func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } +func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{19} +} +func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{20} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{6} + return fileDescriptor_3205c7dc5bf0c9bf, []int{21} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +664,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{7} + return fileDescriptor_3205c7dc5bf0c9bf, []int{22} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +692,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{8} + return fileDescriptor_3205c7dc5bf0c9bf, []int{23} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -296,15 +717,15 @@ func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_aaac5994f79683e8, []int{9} +func (m *Validation) Reset() { *m = Validation{} } +func (*Validation) ProtoMessage() {} +func (*Validation) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{24} } -func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { +func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -312,106 +733,421 @@ func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookClientConfig.Merge(m, src) +func (m *Validation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validation.Merge(m, src) } -func (m *WebhookClientConfig) XXX_Size() int { +func (m *Validation) XXX_Size() int { return m.Size() } -func (m *WebhookClientConfig) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +func (m *Validation) XXX_DiscardUnknown() { + xxx_messageInfo_Validation.DiscardUnknown(m) } -var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo +var xxx_messageInfo_Validation proto.InternalMessageInfo -func init() { - proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") - proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") - proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") - proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule") +func (m *Variable) Reset() { *m = Variable{} } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{25} +} +func (m *Variable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Variable) XXX_Merge(src proto.Message) { + xxx_messageInfo_Variable.Merge(m, src) +} +func (m *Variable) XXX_Size() int { + return m.Size() +} +func (m *Variable) XXX_DiscardUnknown() { + xxx_messageInfo_Variable.DiscardUnknown(m) +} + +var xxx_messageInfo_Variable proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_3205c7dc5bf0c9bf, []int{26} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1.MatchCondition") + proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1.MatchResources") + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook") + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList") + proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.NamedRuleWithOperations") + proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1.ParamKind") + proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1.ParamRef") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1.ServiceReference") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1.TypeChecking") + proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicy") + proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding") + proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingList") + proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec") + proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyList") + proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus") proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1.Validation") + proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1.Variable") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1.WebhookClientConfig") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_aaac5994f79683e8) -} - -var fileDescriptor_aaac5994f79683e8 = []byte{ - // 1102 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0x76, 0x63, 0x8f, 0xf3, 0xa7, 0x19, 0xa0, 0x35, 0xa1, 0xf2, 0x5a, 0x46, 0x42, - 0x46, 0xc0, 0x6e, 0x13, 0x4a, 0xa9, 0xb8, 0xa0, 0x6c, 0xf8, 0xa3, 0x88, 0xa4, 0x8d, 0x26, 0x6d, - 0x8a, 0x50, 0x0e, 0x1d, 0xaf, 0xc7, 0xf6, 0x10, 0x7b, 0x67, 0x35, 0x33, 0x6b, 0xc8, 0x8d, 0x8f, - 0xc0, 0x57, 0x80, 0x4f, 0xc1, 0x0d, 0x71, 0xcb, 0xb1, 0xc7, 0x1c, 0xd0, 0x42, 0x96, 0x0b, 0x07, - 0x3e, 0x41, 0x4e, 0x68, 0x66, 0xd7, 0xbb, 0xfe, 0x93, 0xa4, 0x56, 0x0e, 0x3d, 0xe5, 0xb6, 0xf3, - 0x7b, 0xf3, 0x7e, 0x6f, 0xde, 0xdb, 0xf7, 0xde, 0x0f, 0xec, 0x1c, 0x3d, 0x12, 0x16, 0x65, 0xf6, - 0x51, 0xd0, 0x24, 0xdc, 0x23, 0x92, 0x08, 0x7b, 0x40, 0xbc, 0x16, 0xe3, 0x76, 0x62, 0xc0, 0x3e, - 0xb5, 0x71, 0xab, 0x4f, 0x85, 0xa0, 0xcc, 0xe3, 0xa4, 0x43, 0x85, 0xe4, 0x58, 0x52, 0xe6, 0xd9, - 0x83, 0x75, 0xbb, 0x43, 0x3c, 0xc2, 0xb1, 0x24, 0x2d, 0xcb, 0xe7, 0x4c, 0x32, 0xf8, 0x6e, 0xec, - 0x64, 0x61, 0x9f, 0x5a, 0x17, 0x3a, 0x59, 0x83, 0xf5, 0xb5, 0x8f, 0x3a, 0x54, 0x76, 0x83, 0xa6, - 0xe5, 0xb2, 0xbe, 0xdd, 0x61, 0x1d, 0x66, 0x6b, 0xdf, 0x66, 0xd0, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, - 0x31, 0xe7, 0xda, 0x83, 0xec, 0x21, 0x7d, 0xec, 0x76, 0xa9, 0x47, 0xf8, 0xb1, 0xed, 0x1f, 0x75, - 0x14, 0x20, 0xec, 0x3e, 0x91, 0xf8, 0x82, 0x97, 0xac, 0xd9, 0x97, 0x79, 0xf1, 0xc0, 0x93, 0xb4, - 0x4f, 0xa6, 0x1c, 0x1e, 0xbe, 0xca, 0x41, 0xb8, 0x5d, 0xd2, 0xc7, 0x93, 0x7e, 0xf5, 0xdf, 0x17, - 0xc0, 0xca, 0x6e, 0x20, 0xb1, 0xa4, 0x5e, 0xe7, 0x39, 0x69, 0x76, 0x19, 0x3b, 0x82, 0x35, 0x90, - 0xf7, 0x70, 0x9f, 0x54, 0x8c, 0x9a, 0xd1, 0x28, 0x39, 0x8b, 0x27, 0xa1, 0x39, 0x17, 0x85, 0x66, - 0xfe, 0x31, 0xee, 0x13, 0xa4, 0x2d, 0x90, 0x83, 0x45, 0xb7, 0x47, 0x89, 0x27, 0xb7, 0x98, 0xd7, - 0xa6, 0x9d, 0xca, 0x7c, 0xcd, 0x68, 0x94, 0x37, 0x1e, 0x59, 0x33, 0xd4, 0xcf, 0x4a, 0xa2, 0x6c, - 0x8d, 0xf8, 0x3b, 0x6f, 0x26, 0x31, 0x16, 0x47, 0x51, 0x34, 0x16, 0x03, 0x1e, 0x82, 0x02, 0x0f, - 0x7a, 0x44, 0x54, 0x72, 0xb5, 0x5c, 0xa3, 0xbc, 0xf1, 0xe9, 0x4c, 0xc1, 0x50, 0xd0, 0x23, 0xcf, - 0xa9, 0xec, 0x3e, 0xf1, 0x49, 0x0c, 0x0a, 0x67, 0x29, 0x89, 0x55, 0x50, 0x36, 0x81, 0x62, 0x52, - 0xb8, 0x03, 0x96, 0xda, 0x98, 0xf6, 0x02, 0x4e, 0xf6, 0x58, 0x8f, 0xba, 0xc7, 0x95, 0xbc, 0x4e, - 0xfe, 0xbd, 0x28, 0x34, 0x97, 0xbe, 0x1a, 0x35, 0x9c, 0x87, 0xe6, 0xea, 0x18, 0xf0, 0xf4, 0xd8, - 0x27, 0x68, 0xdc, 0x19, 0x7e, 0x01, 0xca, 0x7d, 0x2c, 0xdd, 0x6e, 0xc2, 0x55, 0xd2, 0x5c, 0xf5, - 0x28, 0x34, 0xcb, 0xbb, 0x19, 0x7c, 0x1e, 0x9a, 0x2b, 0x23, 0x47, 0xcd, 0x33, 0xea, 0x06, 0x7f, - 0x04, 0xab, 0xaa, 0xda, 0xc2, 0xc7, 0x2e, 0xd9, 0x27, 0x3d, 0xe2, 0x4a, 0xc6, 0x2b, 0x05, 0x5d, - 0xea, 0x8f, 0x47, 0xb2, 0x4f, 0xff, 0xb7, 0xe5, 0x1f, 0x75, 0x14, 0x20, 0x2c, 0xd5, 0x56, 0x2a, - 0xfd, 0x1d, 0xdc, 0x24, 0xbd, 0xa1, 0xab, 0xf3, 0x56, 0x14, 0x9a, 0xab, 0x8f, 0x27, 0x19, 0xd1, - 0x74, 0x10, 0xc8, 0xc0, 0x32, 0x6b, 0x7e, 0x4f, 0x5c, 0x99, 0x86, 0x2d, 0x5f, 0x3f, 0x2c, 0x8c, - 0x42, 0x73, 0xf9, 0xc9, 0x18, 0x1d, 0x9a, 0xa0, 0x57, 0x05, 0x13, 0xb4, 0x45, 0xbe, 0x6c, 0xb7, - 0x89, 0x2b, 0x45, 0xe5, 0x56, 0x56, 0xb0, 0xfd, 0x0c, 0x56, 0x05, 0xcb, 0x8e, 0x5b, 0x3d, 0x2c, - 0x04, 0x1a, 0x75, 0x83, 0x9f, 0x81, 0x65, 0xd5, 0xeb, 0x2c, 0x90, 0xfb, 0xc4, 0x65, 0x5e, 0x4b, - 0x54, 0x16, 0x6a, 0x46, 0xa3, 0x10, 0xbf, 0xe0, 0xe9, 0x98, 0x05, 0x4d, 0xdc, 0x84, 0xcf, 0xc0, - 0xdd, 0xb4, 0x8b, 0x10, 0x19, 0x50, 0xf2, 0xc3, 0x01, 0xe1, 0xea, 0x20, 0x2a, 0xc5, 0x5a, 0xae, - 0x51, 0x72, 0xde, 0x89, 0x42, 0xf3, 0xee, 0xe6, 0xc5, 0x57, 0xd0, 0x65, 0xbe, 0xf0, 0x05, 0x80, - 0x9c, 0x50, 0x6f, 0xc0, 0x5c, 0xdd, 0x7e, 0x49, 0x43, 0x00, 0x9d, 0xdf, 0xfd, 0x28, 0x34, 0x21, - 0x9a, 0xb2, 0x9e, 0x87, 0xe6, 0x9d, 0x69, 0x54, 0xb7, 0xc7, 0x05, 0x5c, 0xf5, 0x53, 0x03, 0xdc, - 0x9b, 0x98, 0xe0, 0x78, 0x62, 0x82, 0xb8, 0xe3, 0xe1, 0x0b, 0x50, 0x54, 0x3f, 0xa6, 0x85, 0x25, - 0xd6, 0x23, 0x5d, 0xde, 0xb8, 0x3f, 0xdb, 0x6f, 0x8c, 0xff, 0xd9, 0x2e, 0x91, 0xd8, 0x81, 0xc9, - 0xd0, 0x80, 0x0c, 0x43, 0x29, 0x2b, 0x3c, 0x00, 0xc5, 0x24, 0xb2, 0xa8, 0xcc, 0xeb, 0xe9, 0x7c, - 0x30, 0xd3, 0x74, 0x4e, 0x3c, 0xdb, 0xc9, 0xab, 0x28, 0x28, 0xe5, 0xaa, 0xff, 0x6b, 0x80, 0xda, - 0x55, 0xa9, 0xed, 0x50, 0x21, 0xe1, 0xe1, 0x54, 0x7a, 0xd6, 0x8c, 0x5d, 0x4a, 0x45, 0x9c, 0xdc, - 0xed, 0x24, 0xb9, 0xe2, 0x10, 0x19, 0x49, 0xad, 0x0d, 0x0a, 0x54, 0x92, 0xfe, 0x30, 0xaf, 0xcd, - 0xeb, 0xe4, 0x35, 0xf6, 0xe6, 0x6c, 0xff, 0x6c, 0x2b, 0x5e, 0x14, 0xd3, 0xd7, 0xff, 0x30, 0x40, - 0x5e, 0x2d, 0x24, 0xf8, 0x01, 0x28, 0x61, 0x9f, 0x7e, 0xcd, 0x59, 0xe0, 0x8b, 0x8a, 0xa1, 0x3b, - 0x6f, 0x29, 0x0a, 0xcd, 0xd2, 0xe6, 0xde, 0x76, 0x0c, 0xa2, 0xcc, 0x0e, 0xd7, 0x41, 0x19, 0xfb, - 0x34, 0x6d, 0xd4, 0x79, 0x7d, 0x7d, 0x45, 0x8d, 0xcd, 0xe6, 0xde, 0x76, 0xda, 0x9c, 0xa3, 0x77, - 0x14, 0x3f, 0x27, 0x82, 0x05, 0xdc, 0x4d, 0x56, 0x69, 0xc2, 0x8f, 0x86, 0x20, 0xca, 0xec, 0xf0, - 0x43, 0x50, 0x10, 0x2e, 0xf3, 0x49, 0xb2, 0x0d, 0xef, 0xa8, 0x67, 0xef, 0x2b, 0xe0, 0x3c, 0x34, - 0x4b, 0xfa, 0x43, 0xb7, 0x65, 0x7c, 0xa9, 0xfe, 0xab, 0x01, 0xe0, 0xf4, 0xc2, 0x85, 0x9f, 0x03, - 0xc0, 0xd2, 0x53, 0x92, 0x92, 0xa9, 0x7b, 0x29, 0x45, 0xcf, 0x43, 0x73, 0x29, 0x3d, 0x69, 0xca, - 0x11, 0x17, 0xf8, 0x0d, 0xc8, 0xab, 0x25, 0x9d, 0xa8, 0xcc, 0xfb, 0x33, 0x2f, 0xfe, 0x4c, 0xba, - 0xd4, 0x09, 0x69, 0x92, 0xfa, 0x2f, 0x06, 0xb8, 0xbd, 0x4f, 0xf8, 0x80, 0xba, 0x04, 0x91, 0x36, - 0xe1, 0xc4, 0x73, 0x09, 0xb4, 0x41, 0x29, 0x5d, 0x82, 0x89, 0xec, 0xad, 0x26, 0xbe, 0xa5, 0x74, - 0x61, 0xa2, 0xec, 0x4e, 0x2a, 0x91, 0xf3, 0x97, 0x4a, 0xe4, 0x3d, 0x90, 0xf7, 0xb1, 0xec, 0x56, - 0x72, 0xfa, 0x46, 0x51, 0x59, 0xf7, 0xb0, 0xec, 0x22, 0x8d, 0x6a, 0x2b, 0xe3, 0x52, 0xd7, 0xb5, - 0x90, 0x58, 0x19, 0x97, 0x48, 0xa3, 0xf5, 0xbf, 0x6e, 0x81, 0xd5, 0x03, 0xdc, 0xa3, 0xad, 0x1b, - 0x59, 0xbe, 0x91, 0xe5, 0x2b, 0x65, 0x19, 0xdc, 0xc8, 0xf2, 0x75, 0x64, 0xb9, 0xfe, 0xa7, 0x01, - 0xaa, 0x53, 0x13, 0xf6, 0xba, 0x65, 0xf3, 0xdb, 0x29, 0xd9, 0x7c, 0x38, 0xd3, 0xf4, 0x4c, 0x3d, - 0x7c, 0x4a, 0x38, 0xff, 0x33, 0x40, 0xfd, 0xea, 0xf4, 0x5e, 0x83, 0x74, 0x76, 0xc7, 0xa5, 0x73, - 0xeb, 0x7a, 0xb9, 0xcd, 0x22, 0x9e, 0xbf, 0x19, 0xe0, 0x8d, 0x0b, 0xf6, 0x17, 0x7c, 0x1b, 0xe4, - 0x02, 0xde, 0x4b, 0x56, 0xf0, 0x42, 0x14, 0x9a, 0xb9, 0x67, 0x68, 0x07, 0x29, 0x0c, 0x1e, 0x82, - 0x05, 0x11, 0xab, 0x40, 0x92, 0xf9, 0x27, 0x33, 0x3d, 0x6f, 0x52, 0x39, 0x9c, 0x72, 0x14, 0x9a, - 0x0b, 0x43, 0x74, 0x48, 0x09, 0x1b, 0xa0, 0xe8, 0x62, 0x27, 0xf0, 0x5a, 0x89, 0x6a, 0x2d, 0x3a, - 0x8b, 0xaa, 0x48, 0x5b, 0x9b, 0x31, 0x86, 0x52, 0xab, 0xd3, 0x38, 0x39, 0xab, 0xce, 0xbd, 0x3c, - 0xab, 0xce, 0x9d, 0x9e, 0x55, 0xe7, 0x7e, 0x8a, 0xaa, 0xc6, 0x49, 0x54, 0x35, 0x5e, 0x46, 0x55, - 0xe3, 0x34, 0xaa, 0x1a, 0x7f, 0x47, 0x55, 0xe3, 0xe7, 0x7f, 0xaa, 0x73, 0xdf, 0xcd, 0x0f, 0xd6, - 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x84, 0xf2, 0xb0, 0x00, 0xeb, 0x0e, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_3205c7dc5bf0c9bf) +} + +var fileDescriptor_3205c7dc5bf0c9bf = []byte{ + // 2075 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xf7, 0x8a, 0x94, 0x44, 0x3e, 0xea, 0x8b, 0x13, 0x27, 0xa2, 0x1d, 0x87, 0x2b, 0x6c, 0x82, + 0xc2, 0x46, 0x63, 0x32, 0xb2, 0x53, 0x27, 0x08, 0x8a, 0x06, 0xa2, 0xfc, 0x01, 0xc5, 0x96, 0x2d, + 0x8c, 0x12, 0xa9, 0x68, 0xdd, 0x22, 0xab, 0xdd, 0x21, 0xb9, 0x11, 0xb9, 0xbb, 0xd8, 0xd9, 0x65, + 0xac, 0x9e, 0x8a, 0xf6, 0x5e, 0x14, 0xe8, 0x5f, 0xd0, 0xfe, 0x09, 0xbd, 0xb4, 0x40, 0x4f, 0xbd, + 0xf9, 0x52, 0x20, 0x3d, 0xd5, 0x87, 0x62, 0x51, 0xb3, 0x97, 0x1e, 0x7a, 0x68, 0xaf, 0x02, 0x8a, + 0x16, 0x33, 0x3b, 0xfb, 0xc9, 0xa5, 0xb5, 0x96, 0x6d, 0xf5, 0xe2, 0x9b, 0xf6, 0x7d, 0xfc, 0xde, + 0xbc, 0x37, 0x6f, 0xe6, 0xbd, 0x79, 0x14, 0x5c, 0x3f, 0xfc, 0x98, 0xb6, 0x0c, 0xab, 0xad, 0xda, + 0x46, 0x5b, 0xd5, 0x87, 0x06, 0xa5, 0x86, 0x65, 0x3a, 0xa4, 0x67, 0x50, 0xd7, 0x51, 0x5d, 0xc3, + 0x32, 0xdb, 0xa3, 0xf5, 0x76, 0x8f, 0x98, 0xc4, 0x51, 0x5d, 0xa2, 0xb7, 0x6c, 0xc7, 0x72, 0x2d, + 0xf4, 0x6e, 0xa0, 0xd4, 0x52, 0x6d, 0xa3, 0x95, 0xab, 0xd4, 0x1a, 0xad, 0x5f, 0xbc, 0xda, 0x33, + 0xdc, 0xbe, 0x77, 0xd0, 0xd2, 0xac, 0x61, 0xbb, 0x67, 0xf5, 0xac, 0x36, 0xd7, 0x3d, 0xf0, 0xba, + 0xfc, 0x8b, 0x7f, 0xf0, 0xbf, 0x02, 0xcc, 0x8b, 0x1f, 0xc6, 0x0b, 0x19, 0xaa, 0x5a, 0xdf, 0x30, + 0x89, 0x73, 0xd4, 0xb6, 0x0f, 0x7b, 0x8c, 0x40, 0xdb, 0x43, 0xe2, 0xaa, 0x39, 0x2b, 0xb9, 0xd8, + 0x9e, 0xa6, 0xe5, 0x78, 0xa6, 0x6b, 0x0c, 0xc9, 0x84, 0xc2, 0x8d, 0x93, 0x14, 0xa8, 0xd6, 0x27, + 0x43, 0x35, 0xab, 0xa7, 0x50, 0x58, 0xde, 0xf0, 0x74, 0xc3, 0xdd, 0x30, 0x4d, 0xcb, 0xe5, 0x3e, + 0xa2, 0x77, 0xa0, 0x74, 0x48, 0x8e, 0x1a, 0xd2, 0x9a, 0x74, 0xb9, 0xda, 0xa9, 0x3d, 0xf6, 0xe5, + 0x73, 0x63, 0x5f, 0x2e, 0xdd, 0x25, 0x47, 0x98, 0xd1, 0xd1, 0x06, 0x2c, 0x8f, 0xd4, 0x81, 0x47, + 0x6e, 0x3d, 0xb2, 0x1d, 0xc2, 0x23, 0xd4, 0x98, 0xe1, 0xa2, 0xab, 0x42, 0x74, 0x79, 0x2f, 0xcd, + 0xc6, 0x59, 0x79, 0x65, 0x00, 0xf5, 0xf8, 0x6b, 0x5f, 0x75, 0x4c, 0xc3, 0xec, 0xa1, 0xf7, 0xa1, + 0xd2, 0x35, 0xc8, 0x40, 0xc7, 0xa4, 0x2b, 0x00, 0x57, 0x04, 0x60, 0xe5, 0xb6, 0xa0, 0xe3, 0x48, + 0x02, 0x5d, 0x81, 0xf9, 0xaf, 0x03, 0xc5, 0x46, 0x89, 0x0b, 0x2f, 0x0b, 0xe1, 0x79, 0x81, 0x87, + 0x43, 0xbe, 0xd2, 0x85, 0xa5, 0x6d, 0xd5, 0xd5, 0xfa, 0x9b, 0x96, 0xa9, 0x1b, 0xdc, 0xc3, 0x35, + 0x28, 0x9b, 0xea, 0x90, 0x08, 0x17, 0x17, 0x84, 0x66, 0xf9, 0xbe, 0x3a, 0x24, 0x98, 0x73, 0xd0, + 0x35, 0x00, 0x92, 0xf5, 0x0f, 0x09, 0x39, 0x48, 0xb8, 0x96, 0x90, 0x52, 0xfe, 0x54, 0x16, 0x86, + 0x30, 0xa1, 0x96, 0xe7, 0x68, 0x84, 0xa2, 0x47, 0x50, 0x67, 0x70, 0xd4, 0x56, 0x35, 0xb2, 0x4b, + 0x06, 0x44, 0x73, 0x2d, 0x87, 0x5b, 0xad, 0x5d, 0xbb, 0xde, 0x8a, 0x93, 0x2d, 0xda, 0xb1, 0x96, + 0x7d, 0xd8, 0x63, 0x04, 0xda, 0x62, 0x89, 0xd1, 0x1a, 0xad, 0xb7, 0xee, 0xa9, 0x07, 0x64, 0x10, + 0xaa, 0x76, 0xde, 0x1c, 0xfb, 0x72, 0xfd, 0x7e, 0x16, 0x11, 0x4f, 0x1a, 0x41, 0x16, 0x2c, 0x59, + 0x07, 0x5f, 0x11, 0xcd, 0x8d, 0xcc, 0xce, 0x9c, 0xde, 0x2c, 0x1a, 0xfb, 0xf2, 0xd2, 0x83, 0x14, + 0x1c, 0xce, 0xc0, 0xa3, 0x23, 0x58, 0x74, 0x84, 0xdf, 0xd8, 0x1b, 0x10, 0xda, 0x28, 0xad, 0x95, + 0x2e, 0xd7, 0xae, 0x7d, 0xb7, 0x55, 0xe0, 0x4c, 0xb5, 0x98, 0x4b, 0x3a, 0x53, 0xdb, 0x37, 0xdc, + 0xfe, 0x03, 0x9b, 0x04, 0x1c, 0xda, 0x79, 0x53, 0x84, 0x7c, 0x11, 0x27, 0xa1, 0x71, 0xda, 0x12, + 0xfa, 0x85, 0x04, 0xe7, 0xc9, 0x23, 0x6d, 0xe0, 0xe9, 0x24, 0x25, 0xd7, 0x28, 0xbf, 0x84, 0x25, + 0x5c, 0x12, 0x4b, 0x38, 0x7f, 0x2b, 0xc7, 0x02, 0xce, 0xb5, 0x8b, 0x6e, 0x42, 0x6d, 0xc8, 0x12, + 0x61, 0xc7, 0x1a, 0x18, 0xda, 0x51, 0x63, 0x9e, 0xa7, 0x8f, 0x32, 0xf6, 0xe5, 0xda, 0x76, 0x4c, + 0x3e, 0xf6, 0xe5, 0xe5, 0xc4, 0xe7, 0xe7, 0x47, 0x36, 0xc1, 0x49, 0x35, 0xe5, 0x77, 0x15, 0x58, + 0xde, 0xf6, 0xd8, 0xa1, 0x34, 0x7b, 0xfb, 0xe4, 0xa0, 0x6f, 0x59, 0x87, 0x05, 0x32, 0xd7, 0x81, + 0x05, 0x6d, 0x60, 0x10, 0xd3, 0xdd, 0xb4, 0xcc, 0xae, 0xd1, 0x13, 0xdb, 0xfe, 0x71, 0xa1, 0x18, + 0x08, 0x2b, 0x9b, 0x09, 0xfd, 0xce, 0x79, 0x61, 0x63, 0x21, 0x49, 0xc5, 0x29, 0x1b, 0xe8, 0x21, + 0xcc, 0x3a, 0x89, 0x3d, 0xff, 0xa8, 0x90, 0xb1, 0x9c, 0x58, 0x2f, 0x0a, 0x5b, 0xb3, 0x41, 0x70, + 0x03, 0x50, 0x74, 0x0f, 0x16, 0xbb, 0xaa, 0x31, 0xf0, 0x1c, 0x22, 0xe2, 0x59, 0xe6, 0xce, 0x7f, + 0x8b, 0xe5, 0xc5, 0xed, 0x24, 0xe3, 0xd8, 0x97, 0xeb, 0x29, 0x02, 0x8f, 0x69, 0x5a, 0x39, 0xbb, + 0x37, 0xd5, 0x53, 0xed, 0x4d, 0xfe, 0xc1, 0x9e, 0xfd, 0xff, 0x1c, 0xec, 0xda, 0xab, 0x3d, 0xd8, + 0x37, 0xa1, 0x46, 0x0d, 0x9d, 0xdc, 0xea, 0x76, 0x89, 0xe6, 0xd2, 0xc6, 0x5c, 0x1c, 0xb0, 0xdd, + 0x98, 0xcc, 0x02, 0x16, 0x7f, 0x6e, 0x0e, 0x54, 0x4a, 0x71, 0x52, 0x0d, 0x7d, 0x02, 0x4b, 0xac, + 0x0c, 0x59, 0x9e, 0xbb, 0x4b, 0x34, 0xcb, 0xd4, 0x29, 0x3f, 0x15, 0xb3, 0xc1, 0x0a, 0x3e, 0x4f, + 0x71, 0x70, 0x46, 0x12, 0x7d, 0x01, 0xab, 0x51, 0x16, 0x61, 0x32, 0x32, 0xc8, 0xd7, 0x7b, 0xc4, + 0x61, 0x1f, 0xb4, 0x51, 0x59, 0x2b, 0x5d, 0xae, 0x76, 0xde, 0x1e, 0xfb, 0xf2, 0xea, 0x46, 0xbe, + 0x08, 0x9e, 0xa6, 0x8b, 0xbe, 0x04, 0xe4, 0x10, 0xc3, 0x1c, 0x59, 0x1a, 0x4f, 0x3f, 0x91, 0x10, + 0xc0, 0xfd, 0xfb, 0x60, 0xec, 0xcb, 0x08, 0x4f, 0x70, 0x8f, 0x7d, 0xf9, 0xad, 0x49, 0x2a, 0x4f, + 0x8f, 0x1c, 0x2c, 0x34, 0x82, 0xe5, 0x61, 0xaa, 0xf2, 0xd0, 0xc6, 0x02, 0x3f, 0x21, 0xd7, 0x0b, + 0x9d, 0x90, 0x74, 0xd5, 0x8a, 0xeb, 0x6b, 0x9a, 0x4e, 0x71, 0xd6, 0x88, 0xf2, 0x44, 0x82, 0x4b, + 0x99, 0x9b, 0x23, 0x38, 0xa9, 0x5e, 0x00, 0x8e, 0xbe, 0x84, 0x0a, 0x4b, 0x08, 0x5d, 0x75, 0x55, + 0x51, 0x8e, 0x3e, 0x28, 0x96, 0x3e, 0x41, 0xae, 0x6c, 0x13, 0x57, 0x8d, 0xcb, 0x61, 0x4c, 0xc3, + 0x11, 0x2a, 0xda, 0x83, 0x8a, 0xb0, 0x4c, 0x1b, 0x33, 0xdc, 0xe7, 0x0f, 0x8b, 0xf9, 0x9c, 0x5e, + 0x76, 0xa7, 0xcc, 0xac, 0xe0, 0x08, 0x4b, 0xf9, 0x87, 0x04, 0x6b, 0xcf, 0x72, 0xed, 0x9e, 0x41, + 0x5d, 0xf4, 0x70, 0xc2, 0xbd, 0x56, 0xc1, 0xd3, 0x61, 0xd0, 0xc0, 0xb9, 0xa8, 0xf5, 0x08, 0x29, + 0x09, 0xd7, 0xba, 0x30, 0x6b, 0xb8, 0x64, 0x18, 0xfa, 0xb5, 0x71, 0x1a, 0xbf, 0x52, 0x6b, 0x8e, + 0xef, 0xbd, 0x2d, 0x86, 0x8b, 0x03, 0x78, 0xb6, 0x8b, 0xab, 0x53, 0xaa, 0x12, 0xfa, 0x28, 0xae, + 0xb6, 0xfc, 0xd6, 0x68, 0x48, 0xfc, 0x20, 0xd4, 0x93, 0xb5, 0x92, 0x33, 0x70, 0x5a, 0x0e, 0xfd, + 0x5c, 0x02, 0xe4, 0x4c, 0xe0, 0x89, 0x2a, 0x71, 0xea, 0x8b, 0xfb, 0xa2, 0x70, 0x00, 0x4d, 0xf2, + 0x70, 0x8e, 0x39, 0x45, 0x85, 0xea, 0x8e, 0xea, 0xa8, 0xc3, 0xbb, 0x86, 0xa9, 0xb3, 0x5e, 0x4b, + 0xb5, 0x0d, 0x71, 0x2c, 0x45, 0x65, 0x8b, 0x92, 0x6b, 0x63, 0x67, 0x4b, 0x70, 0x70, 0x42, 0x8a, + 0xd5, 0xc1, 0x43, 0xc3, 0xd4, 0x45, 0x67, 0x16, 0xd5, 0x41, 0x86, 0x87, 0x39, 0x47, 0xf9, 0xed, + 0x0c, 0x54, 0xb8, 0x0d, 0xd6, 0x2d, 0x9e, 0x5c, 0x36, 0xdb, 0x50, 0x8d, 0xee, 0x5a, 0x81, 0x5a, + 0x17, 0x62, 0xd5, 0xe8, 0x5e, 0xc6, 0xb1, 0x0c, 0xfa, 0x11, 0x54, 0x68, 0x78, 0x03, 0x97, 0x4e, + 0x7f, 0x03, 0x2f, 0xb0, 0x24, 0x8b, 0xee, 0xde, 0x08, 0x12, 0xb9, 0xb0, 0x6a, 0xb3, 0xd5, 0x13, + 0x97, 0x38, 0xf7, 0x2d, 0xf7, 0xb6, 0xe5, 0x99, 0xfa, 0x86, 0xc6, 0xa2, 0x27, 0xca, 0xdf, 0x27, + 0xec, 0xce, 0xdb, 0xc9, 0x17, 0x39, 0xf6, 0xe5, 0xb7, 0xa7, 0xb0, 0xf8, 0x5d, 0x35, 0x0d, 0x5a, + 0xf9, 0xa3, 0x04, 0x65, 0xb6, 0x85, 0xe8, 0xdb, 0x50, 0x55, 0x6d, 0xe3, 0x8e, 0x63, 0x79, 0x76, + 0x98, 0x5b, 0x8b, 0x2c, 0x14, 0x1b, 0x3b, 0x5b, 0x01, 0x11, 0xc7, 0x7c, 0xb4, 0x0e, 0xb5, 0x78, + 0x6b, 0x82, 0x63, 0x51, 0xed, 0x2c, 0xb3, 0x0a, 0x11, 0xef, 0x1e, 0xc5, 0x49, 0x19, 0x86, 0x1f, + 0xe6, 0x65, 0xd0, 0x35, 0x08, 0xfc, 0xa8, 0x75, 0xc6, 0x31, 0x1f, 0xbd, 0x0f, 0xb3, 0x54, 0xb3, + 0x6c, 0x22, 0x3c, 0x7f, 0x8b, 0x9d, 0x94, 0x5d, 0x46, 0x38, 0xf6, 0xe5, 0x2a, 0xff, 0x83, 0x7b, + 0x15, 0x08, 0x29, 0xbf, 0x91, 0x20, 0x27, 0x0d, 0xd1, 0xa7, 0x00, 0x56, 0x9c, 0xef, 0x81, 0x4b, + 0x32, 0xbf, 0xbe, 0x22, 0xea, 0xb1, 0x2f, 0x2f, 0x46, 0x5f, 0x1c, 0x32, 0xa1, 0x82, 0xee, 0x42, + 0x99, 0x65, 0xb2, 0x38, 0x2a, 0x57, 0x0a, 0x1f, 0x95, 0x38, 0xdd, 0xd8, 0x17, 0xe6, 0x20, 0xca, + 0xaf, 0x25, 0x58, 0xd9, 0x25, 0xce, 0xc8, 0xd0, 0x08, 0x26, 0x5d, 0xe2, 0x10, 0x53, 0xcb, 0xe4, + 0xa0, 0x54, 0x20, 0x07, 0xc3, 0xb4, 0x9e, 0x99, 0x9a, 0xd6, 0x97, 0xa0, 0x6c, 0xab, 0x6e, 0x5f, + 0xbc, 0x91, 0x2a, 0x8c, 0xbb, 0xa3, 0xba, 0x7d, 0xcc, 0xa9, 0x9c, 0x6b, 0x39, 0x2e, 0x8f, 0xeb, + 0xac, 0xe0, 0x5a, 0x8e, 0x8b, 0x39, 0x55, 0xf9, 0x95, 0x04, 0x0b, 0x2c, 0x0a, 0x9b, 0x7d, 0xa2, + 0x1d, 0xb2, 0x17, 0xda, 0xcf, 0x24, 0x40, 0x24, 0xfb, 0x6e, 0x0b, 0x62, 0x59, 0xbb, 0x76, 0xa3, + 0x50, 0x40, 0x26, 0x9e, 0x7d, 0xf1, 0xd5, 0x31, 0xc1, 0xa2, 0x38, 0xc7, 0x9a, 0xf2, 0xe7, 0x19, + 0xb8, 0xb0, 0xa7, 0x0e, 0x0c, 0x9d, 0x5f, 0xa7, 0x51, 0xd1, 0x17, 0x15, 0xf7, 0xd5, 0x17, 0x36, + 0x1d, 0xca, 0xd4, 0x26, 0x9a, 0x48, 0x83, 0x4e, 0x21, 0xaf, 0xa7, 0xae, 0x77, 0xd7, 0x26, 0x5a, + 0xbc, 0x6f, 0xec, 0x0b, 0x73, 0x74, 0x34, 0x80, 0x39, 0xea, 0xaa, 0xae, 0x47, 0xc5, 0xdd, 0x72, + 0xf3, 0x05, 0xed, 0x70, 0xac, 0xce, 0x92, 0xb0, 0x34, 0x17, 0x7c, 0x63, 0x61, 0x43, 0xf9, 0xb7, + 0x04, 0x6b, 0x53, 0x75, 0x3b, 0x86, 0xa9, 0xb3, 0xdd, 0x7f, 0xf5, 0xa1, 0x3d, 0x4c, 0x85, 0x76, + 0xeb, 0xc5, 0x5c, 0x16, 0xcb, 0x9e, 0x16, 0x61, 0xe5, 0x5f, 0x12, 0xbc, 0x77, 0x92, 0xf2, 0x19, + 0x34, 0x13, 0x5f, 0xa5, 0x9b, 0x89, 0x5b, 0x2f, 0xc5, 0xe9, 0x29, 0x0d, 0xc5, 0x7f, 0x66, 0x4e, + 0x76, 0x99, 0x45, 0x88, 0x55, 0x64, 0x9b, 0x13, 0xef, 0xc7, 0x45, 0x33, 0xda, 0xba, 0x9d, 0x88, + 0x83, 0x13, 0x52, 0x68, 0x1f, 0x2a, 0xb6, 0x28, 0xb7, 0x62, 0x03, 0xaf, 0x16, 0xf2, 0x25, 0xac, + 0xd1, 0x41, 0x25, 0x0c, 0xbf, 0x70, 0x04, 0xc6, 0x1e, 0x3c, 0xc3, 0xd4, 0x54, 0x25, 0xa7, 0xdc, + 0x9e, 0xd0, 0x43, 0x47, 0xaa, 0xc1, 0x73, 0x23, 0x4d, 0xc3, 0x19, 0x78, 0xb4, 0x0f, 0xf5, 0x91, + 0x88, 0x92, 0x65, 0x06, 0x85, 0x31, 0x18, 0x25, 0x54, 0x3b, 0x57, 0xd8, 0x33, 0x6d, 0x2f, 0xcb, + 0x3c, 0xf6, 0xe5, 0x95, 0x2c, 0x11, 0x4f, 0x62, 0x28, 0x63, 0x09, 0xde, 0x99, 0x1a, 0xff, 0x33, + 0xc8, 0x35, 0x2d, 0x9d, 0x6b, 0xdf, 0x7b, 0xc1, 0x5c, 0x9b, 0x92, 0x64, 0xb3, 0xcf, 0x70, 0x92, + 0x67, 0xd7, 0x0f, 0xa1, 0x6a, 0x87, 0xcd, 0x5f, 0x8e, 0x97, 0x27, 0xa4, 0x0a, 0xd3, 0x0a, 0x7a, + 0x85, 0xe8, 0x13, 0xc7, 0x78, 0xc8, 0x83, 0x95, 0xf0, 0x35, 0xc4, 0x54, 0x0d, 0xd3, 0xa5, 0x39, + 0x93, 0xaf, 0xc2, 0xf9, 0x72, 0x7e, 0xec, 0xcb, 0x2b, 0xdb, 0x19, 0x40, 0x3c, 0x61, 0x02, 0x75, + 0xa1, 0x16, 0xef, 0x77, 0x38, 0x07, 0x69, 0x3f, 0x57, 0x80, 0x2d, 0xb3, 0xf3, 0x86, 0x88, 0x68, + 0x2d, 0xa6, 0x51, 0x9c, 0x04, 0x7e, 0xc9, 0xb3, 0x90, 0x9f, 0xc0, 0x8a, 0x9a, 0x1e, 0xfe, 0xd2, + 0xc6, 0xec, 0x73, 0x3c, 0xd6, 0x32, 0x93, 0xe3, 0x4e, 0x43, 0xac, 0x7f, 0x25, 0xc3, 0xa0, 0x78, + 0xc2, 0x4e, 0xde, 0xdb, 0x78, 0xee, 0x0c, 0xde, 0xc6, 0xe8, 0xc7, 0x50, 0x1d, 0xa9, 0x8e, 0xa1, + 0x1e, 0x0c, 0x08, 0x6d, 0xcc, 0x73, 0x8b, 0x57, 0x0b, 0xee, 0x53, 0xa0, 0x15, 0xf7, 0x64, 0x21, + 0x85, 0xe2, 0x18, 0x52, 0xf9, 0xc3, 0x0c, 0xc8, 0x27, 0xd4, 0x61, 0xf4, 0x19, 0x20, 0xeb, 0x80, + 0x12, 0x67, 0x44, 0xf4, 0x3b, 0xc1, 0x3c, 0x3e, 0x7c, 0xf9, 0x94, 0xe2, 0x7e, 0xe8, 0xc1, 0x84, + 0x04, 0xce, 0xd1, 0x42, 0x3d, 0x58, 0x70, 0x13, 0x4d, 0x9a, 0x48, 0xf6, 0xf5, 0x42, 0x2e, 0x25, + 0xbb, 0xbb, 0xce, 0xca, 0xd8, 0x97, 0x53, 0xfd, 0x1e, 0x4e, 0x01, 0x23, 0x0d, 0x40, 0x8b, 0xf7, + 0x6a, 0x32, 0xc3, 0x9f, 0x71, 0x3b, 0xc5, 0xfb, 0x14, 0x55, 0x91, 0xc4, 0x16, 0x25, 0x60, 0x95, + 0xbf, 0xcc, 0x43, 0x3d, 0x8e, 0xde, 0xeb, 0xa9, 0xe7, 0xeb, 0xa9, 0xe7, 0xb4, 0xa9, 0x27, 0xbc, + 0x9e, 0x7a, 0x9e, 0x6a, 0xea, 0x99, 0x73, 0xef, 0xd6, 0xce, 0x62, 0x26, 0xf9, 0x57, 0x09, 0x9a, + 0x13, 0x27, 0xfb, 0xac, 0xa7, 0x92, 0xdf, 0x9f, 0x98, 0x4a, 0xde, 0x78, 0xce, 0x26, 0x68, 0xda, + 0x5c, 0xf2, 0x9f, 0x12, 0x28, 0xcf, 0x76, 0xef, 0x0c, 0x1a, 0xbc, 0x7e, 0xba, 0xc1, 0xdb, 0x3c, + 0x9d, 0x6f, 0x45, 0x66, 0x93, 0xff, 0x95, 0x00, 0xe2, 0x26, 0x05, 0xbd, 0x07, 0x89, 0x1f, 0x45, + 0xc5, 0x35, 0x1d, 0x44, 0x28, 0x41, 0x47, 0x57, 0x60, 0x7e, 0x48, 0x28, 0x55, 0x7b, 0xe1, 0xc4, + 0x22, 0xfa, 0xcd, 0x76, 0x3b, 0x20, 0xe3, 0x90, 0x8f, 0xf6, 0x61, 0xce, 0x21, 0x2a, 0xb5, 0x4c, + 0x31, 0xb9, 0xf8, 0x94, 0xbd, 0x5a, 0x31, 0xa7, 0x1c, 0xfb, 0xf2, 0x7a, 0x91, 0xdf, 0xd4, 0x5b, + 0xe2, 0x91, 0xcb, 0x95, 0xb0, 0x80, 0x43, 0x77, 0xa0, 0x2e, 0x6c, 0x24, 0x16, 0x1c, 0x5c, 0xad, + 0x17, 0xc4, 0x6a, 0xea, 0xdb, 0x59, 0x01, 0x3c, 0xa9, 0xa3, 0x7c, 0x06, 0x95, 0xb0, 0xfe, 0xa3, + 0x06, 0x94, 0x13, 0x2f, 0xa5, 0xc0, 0x71, 0x4e, 0xc9, 0x04, 0x66, 0x26, 0x3f, 0x30, 0xca, 0xef, + 0x25, 0x78, 0x23, 0xa7, 0x0a, 0xa1, 0x0b, 0x50, 0xf2, 0x9c, 0x81, 0x08, 0xc1, 0xfc, 0xd8, 0x97, + 0x4b, 0x5f, 0xe0, 0x7b, 0x98, 0xd1, 0xd0, 0x43, 0x98, 0xa7, 0xc1, 0xfc, 0x48, 0xe4, 0xd1, 0x77, + 0x0a, 0x6d, 0x76, 0x76, 0xe6, 0xd4, 0xa9, 0xb1, 0xf0, 0x87, 0xd4, 0x10, 0x12, 0x5d, 0x86, 0x8a, + 0xa6, 0x76, 0x3c, 0x53, 0x17, 0xf3, 0xae, 0x85, 0xe0, 0x75, 0xb6, 0xb9, 0x11, 0xd0, 0x70, 0xc4, + 0xed, 0x6c, 0x3d, 0x7e, 0xda, 0x3c, 0xf7, 0xcd, 0xd3, 0xe6, 0xb9, 0x27, 0x4f, 0x9b, 0xe7, 0x7e, + 0x3a, 0x6e, 0x4a, 0x8f, 0xc7, 0x4d, 0xe9, 0x9b, 0x71, 0x53, 0x7a, 0x32, 0x6e, 0x4a, 0x7f, 0x1b, + 0x37, 0xa5, 0x5f, 0xfe, 0xbd, 0x79, 0xee, 0x07, 0xef, 0x16, 0xf8, 0x6f, 0x8c, 0xff, 0x05, 0x00, + 0x00, 0xff, 0xff, 0x1e, 0x59, 0xab, 0xd9, 0xb3, 0x21, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MatchResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x3a + } + if len(m.ExcludeResourceRules) > 0 { + for iNdEx := len(m.ExcludeResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExcludeResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { @@ -434,6 +1170,20 @@ func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.ObjectSelector != nil { { size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -626,7 +1376,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *Rule) Marshal() (dAtA []byte, err error) { +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -636,46 +1386,31 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Rule) MarshalTo(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Scope != nil { - i -= len(*m.Scope) - copy(dAtA[i:], *m.Scope) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i-- - dAtA[i] = 0x22 - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.APIVersions) > 0 { - for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIVersions[iNdEx]) - copy(dAtA[i:], m.APIVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) - i-- - dAtA[i] = 0x12 + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) i-- dAtA[i] = 0xa } @@ -683,7 +1418,7 @@ func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { +func (m *ParamKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -693,39 +1428,30 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) i-- dAtA[i] = 0x12 - if len(m.Operations) > 0 { - for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Operations[iNdEx]) - copy(dAtA[i:], m.Operations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { +func (m *ParamRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -735,42 +1461,49 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x22 } - if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x1a } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 i -= len(m.Namespace) copy(dAtA[i:], m.Namespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -780,91 +1513,75 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x22 } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) i-- - dAtA[i] = 0x42 + dAtA[i] = 0x1a } } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.APIVersions) > 0 { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa } } + return len(dAtA) - i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -873,15 +1590,64 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -891,20 +1657,20 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -912,23 +1678,13 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0xa } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -938,32 +1694,38 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -975,7 +1737,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -985,490 +1747,3689 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0x1a - } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- - dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *MutatingWebhook) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Rule) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if m.Scope != nil { - l = len(*m.Scope) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *RuleWithOperations) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.Rule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) - } - return n +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhook) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - return n -} - -func (m *ValidatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *WebhookClientConfig) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return n + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - repeatedStringForRules += "}" - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a } - repeatedStringForWebhooks := "[]MutatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - repeatedStringForItems := "[]MutatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 } - repeatedStringForItems += "}" - s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `Scope:` + valueToStringGenerated(this.Scope) + `,`, - `}`, - }, "") - return s -} -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingWebhook) String() string { - if this == nil { - return "nil" + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - repeatedStringForRules += "}" - s := strings.Join([]string{`&ValidatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *ValidatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - repeatedStringForWebhooks := "[]ValidatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (this *ValidatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ValidatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return dAtA[:n], nil } -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(this.RuleWithOperations.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleWithOperations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RuleWithOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex]) + m.ParameterNotFoundAction = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { +func (m *ServiceReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1491,13 +5452,45 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } @@ -1529,9 +5522,112 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1558,13 +5654,64 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1591,16 +5738,15 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1610,28 +5756,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1658,71 +5804,65 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.TimeoutSeconds = &v - case 8: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1732,29 +5872,30 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1764,30 +5905,80 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1797,28 +5988,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) - m.ReinvocationPolicy = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1845,10 +6036,8 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1873,7 +6062,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1892,19 +6081,51 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1931,13 +6152,16 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1964,11 +6188,45 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, MutatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1990,7 +6248,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2013,10 +6271,10 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2081,7 +6339,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) + m.Items = append(m.Items, ValidatingAdmissionPolicy{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2107,7 +6365,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Rule) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2130,17 +6388,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2150,29 +6408,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2182,29 +6444,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2214,27 +6480,29 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + m.Validations = append(m.Validations, Validation{}) + if err := m.Validations[len(m.Validations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2262,64 +6530,48 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2329,27 +6581,29 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2376,7 +6630,8 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2401,7 +6656,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceReference) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2424,17 +6679,17 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var stringLen uint64 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2444,29 +6699,16 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2476,62 +6718,33 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2541,12 +6754,26 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Port = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2767,9 +6994,247 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2779,50 +7244,30 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TimeoutSeconds = &v - case 8: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2832,29 +7277,81 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2864,28 +7361,28 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2912,10 +7409,8 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2940,7 +7435,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *Validation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2963,17 +7458,17 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: Validation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Validation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2983,30 +7478,29 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3016,25 +7510,88 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3057,7 +7614,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *Variable) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3080,17 +7637,17 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: Variable: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3100,30 +7657,29 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3133,25 +7689,23 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto index c23bb4bee..e856e9eaf 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -26,7 +26,181 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/admissionregistration/v1"; + +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +message MatchResources { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + repeated NamedRuleWithOperations resourceRules = 3; + + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + repeated NamedRuleWithOperations excludeResourceRules = 4; + + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 7; +} // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { @@ -47,6 +221,7 @@ message MutatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic repeated RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -115,7 +290,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -129,7 +304,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -154,6 +329,7 @@ message MutatingWebhook { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic repeated string admissionReviewVersions = 8; // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -173,18 +349,39 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated MutatingWebhook Webhooks = 2; } @@ -193,23 +390,107 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; } +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +message NamedRuleWithOperations { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + repeated string resourceNames = 1; + + // RuleWithOperations is a tuple of Operations and Resources. + optional RuleWithOperations ruleWithOperations = 2; +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +message ParamKind { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + optional string apiVersion = 1; + + // Kind is the API kind the resources belong to. + // Required. + optional string kind = 2; +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +message ParamRef { + // name is the name of the resource being referenced. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + optional string name = 1; + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + optional string namespace = 2; + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + optional string parameterNotFoundAction = 4; +} + // Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended // to make sure that all the tuple expansions are valid. message Rule { // APIGroups is the API groups the resources belong to. '*' is all groups. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic repeated string apiGroups = 1; // APIVersions is the API versions the resources belong to. '*' is all versions. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. @@ -227,6 +508,7 @@ message Rule { // // Depending on the enclosing object, subresources might not be allowed. // Required. + // +listType=atomic repeated string resources = 3; // scope specifies the scope of this rule. @@ -249,6 +531,7 @@ message RuleWithOperations { // for all of those operations and any future admission operations that are added. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic repeated string operations = 1; // Rule is embedded, it describes other criteria of the rule, like @@ -278,6 +561,241 @@ message ServiceReference { optional int32 port = 4; } +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +message ValidatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; +} + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message ValidatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + optional ValidatingAdmissionPolicyBindingSpec spec = 2; +} + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated ValidatingAdmissionPolicyBinding items = 2; +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingSpec { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; +} + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated ValidatingAdmissionPolicy items = 2; +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +message ValidatingAdmissionPolicySpec { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + optional MatchResources matchConstraints = 2; + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + repeated Validation validations = 3; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated Variable variables = 7; +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; +} + // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. message ValidatingWebhook { // The name of the admission webhook. @@ -297,6 +815,7 @@ message ValidatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic repeated RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -365,7 +884,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -379,7 +898,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -404,19 +923,41 @@ message ValidatingWebhook { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated ValidatingWebhook Webhooks = 2; } @@ -425,12 +966,103 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; } +// Validation specifies the CEL expression which is used to apply the validation. +message Validation { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + optional string Expression = 1; + + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + optional string message = 2; + + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +message Variable { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + optional string Name = 1; + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + optional string Expression = 2; +} + // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { diff --git a/vendor/k8s.io/api/admissionregistration/v1/register.go b/vendor/k8s.io/api/admissionregistration/v1/register.go index e42a8bce3..da74379ce 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1/register.go @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingWebhookConfigurationList{}, &MutatingWebhookConfiguration{}, &MutatingWebhookConfigurationList{}, + &ValidatingAdmissionPolicy{}, + &ValidatingAdmissionPolicyList{}, + &ValidatingAdmissionPolicyBinding{}, + &ValidatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go index 29873b796..4efeb2674 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -26,11 +26,13 @@ type Rule struct { // APIGroups is the API groups the resources belong to. '*' is all groups. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` // APIVersions is the API versions the resources belong to. '*' is all versions. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` // Resources is a list of resources this rule applies to. @@ -48,6 +50,7 @@ type Rule struct { // // Depending on the enclosing object, subresources might not be allowed. // Required. + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // scope specifies the scope of this rule. @@ -88,6 +91,18 @@ const ( Fail FailurePolicyType = "Fail" ) +// ParameterNotFoundActionType specifies a failure policy that defines how a binding +// is evaluated when the param referred by its perNamespaceParamRef is not found. +type ParameterNotFoundActionType string + +const ( + // Allow means all requests will be admitted if no param resources + // could be found. + AllowAction ParameterNotFoundActionType = "Allow" + // Deny means all requests will be denied if no param resources are found. + DenyAction ParameterNotFoundActionType = "Deny" +) + // MatchPolicyType specifies the type of match policy. // +enum type MatchPolicyType string @@ -117,9 +132,590 @@ const ( SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +type ValidatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// ValidatingAdmissionPolicyConditionType is the condition type of admission validation policy. +type ValidatingAdmissionPolicyConditionType string + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +type ValidatingAdmissionPolicySpec struct { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +type ParamKind struct { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"` + + // Kind is the API kind the resources belong to. + // Required. + Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"` +} + +// Validation specifies the CEL expression which is used to apply the validation. +type Validation struct { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"` + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +type Variable struct { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + Name string `json:"name" protobuf:"bytes,1,opt,name=Name"` + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type ValidatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingSpec struct { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +type ParamRef struct { + // name is the name of the resource being referenced. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + // + Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"` + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"` + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"` +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +type MatchResources struct { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"` + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"` + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"` + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` +} + +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +type NamedRuleWithOperations struct { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"` + // RuleWithOperations is a tuple of Operations and Resources. + RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. type ValidatingWebhookConfiguration struct { @@ -131,10 +727,13 @@ type ValidatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. type ValidatingWebhookConfigurationList struct { @@ -150,6 +749,7 @@ type ValidatingWebhookConfigurationList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. type MutatingWebhookConfiguration struct { @@ -161,10 +761,13 @@ type MutatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. type MutatingWebhookConfigurationList struct { @@ -196,6 +799,7 @@ type ValidatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -303,7 +907,27 @@ type ValidatingWebhook struct { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,opt,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -325,6 +949,7 @@ type MutatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -432,6 +1057,7 @@ type MutatingWebhook struct { // If a persisted webhook configuration specifies allowed versions and does not // include any versions known to the API Server, calls to the webhook will fail // and be subject to the failure policy. + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"` // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -451,6 +1077,25 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -474,6 +1119,7 @@ type RuleWithOperations struct { // for all of those operations and any future admission operations that are added. // If '*' is present, the length of the slice must be one. // Required. + // +listType=atomic Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` // Rule is embedded, it describes other criteria of the rule, like // APIGroups, APIVersions, Resources, etc. @@ -559,3 +1205,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go index ba92729c3..f43139505 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go @@ -24,9 +24,52 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + +var map_MatchResources = map[string]string{ + "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", +} + +func (MatchResources) SwaggerDoc() map[string]string { + return map_MatchResources +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +83,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -66,6 +110,37 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_MutatingWebhookConfigurationList } +var map_NamedRuleWithOperations = map[string]string{ + "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", +} + +func (NamedRuleWithOperations) SwaggerDoc() map[string]string { + return map_NamedRuleWithOperations +} + +var map_ParamKind = map[string]string{ + "": "ParamKind is a tuple of Group Kind and Version.", + "apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "kind": "Kind is the API kind the resources belong to. Required.", +} + +func (ParamKind) SwaggerDoc() map[string]string { + return map_ParamKind +} + +var map_ParamRef = map[string]string{ + "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "name": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", + "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", +} + +func (ParamRef) SwaggerDoc() map[string]string { + return map_ParamRef +} + var map_Rule = map[string]string{ "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", @@ -99,6 +174,94 @@ func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + +var map_ValidatingAdmissionPolicy = map[string]string{ + "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", +} + +func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicy +} + +var map_ValidatingAdmissionPolicyBinding = map[string]string{ + "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", +} + +func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBinding +} + +var map_ValidatingAdmissionPolicyBindingList = map[string]string{ + "": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingList +} + +var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", +} + +func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingSpec +} + +var map_ValidatingAdmissionPolicyList = map[string]string{ + "": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyList +} + +var map_ValidatingAdmissionPolicySpec = map[string]string{ + "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", +} + +func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicySpec +} + +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + var map_ValidatingWebhook = map[string]string{ "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -111,6 +274,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (ValidatingWebhook) SwaggerDoc() map[string]string { @@ -137,6 +301,28 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } +var map_Validation = map[string]string{ + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", +} + +func (Validation) SwaggerDoc() map[string]string { + return map_Validation +} + +var map_Variable = map[string]string{ + "": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", +} + +func (Variable) SwaggerDoc() map[string]string { + return map_Variable +} + var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go index cff7377a5..bfe599c1d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go @@ -26,6 +26,99 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchResources) DeepCopyInto(out *MatchResources) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeResourceRules != nil { + in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources. +func (in *MatchResources) DeepCopy() *MatchResources { + if in == nil { + return nil + } + out := new(MatchResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in @@ -77,6 +170,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -156,6 +254,70 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { + *out = *in + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations. +func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations { + if in == nil { + return nil + } + out := new(NamedRuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamKind) DeepCopyInto(out *ParamKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind. +func (in *ParamKind) DeepCopy() *ParamKind { + if in == nil { + return nil + } + out := new(ParamKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamRef) DeepCopyInto(out *ParamRef) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ParameterNotFoundAction != nil { + in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction + *out = new(ParameterNotFoundActionType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef. +func (in *ParamRef) DeepCopy() *ParamRef { + if in == nil { + return nil + } + out := new(ParamRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Rule) DeepCopyInto(out *Rule) { *out = *in @@ -240,6 +402,260 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy. +func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding. +func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList. +func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Validations != nil { + in, out := &in.Validations, &out.Validations + *out = make([]Validation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec. +func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = *in @@ -286,6 +702,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -365,6 +786,43 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Validation) DeepCopyInto(out *Validation) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(metav1.StatusReason) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation. +func (in *Validation) DeepCopy() *Validation { + if in == nil { + return nil + } + out := new(Validation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..0862bb1f2 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go new file mode 100644 index 000000000..385c60e0d --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go new file mode 100644 index 000000000..111cc7287 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -0,0 +1,4633 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/admissionregistration/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + +func (m *MatchResources) Reset() { *m = MatchResources{} } +func (*MatchResources) ProtoMessage() {} +func (*MatchResources) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{3} +} +func (m *MatchResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchResources.Merge(m, src) +} +func (m *MatchResources) XXX_Size() int { + return m.Size() +} +func (m *MatchResources) XXX_DiscardUnknown() { + xxx_messageInfo_MatchResources.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchResources proto.InternalMessageInfo + +func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } +func (*NamedRuleWithOperations) ProtoMessage() {} +func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{4} +} +func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRuleWithOperations.Merge(m, src) +} +func (m *NamedRuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *NamedRuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo + +func (m *ParamKind) Reset() { *m = ParamKind{} } +func (*ParamKind) ProtoMessage() {} +func (*ParamKind) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{5} +} +func (m *ParamKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamKind.Merge(m, src) +} +func (m *ParamKind) XXX_Size() int { + return m.Size() +} +func (m *ParamKind) XXX_DiscardUnknown() { + xxx_messageInfo_ParamKind.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamKind proto.InternalMessageInfo + +func (m *ParamRef) Reset() { *m = ParamRef{} } +func (*ParamRef) ProtoMessage() {} +func (*ParamRef) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{6} +} +func (m *ParamRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamRef.Merge(m, src) +} +func (m *ParamRef) XXX_Size() int { + return m.Size() +} +func (m *ParamRef) XXX_DiscardUnknown() { + xxx_messageInfo_ParamRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamRef proto.InternalMessageInfo + +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{7} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } +func (*ValidatingAdmissionPolicy) ProtoMessage() {} +func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{8} +} +func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src) +} +func (m *ValidatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } +func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{9} +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } +func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{10} +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } +func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{11} +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } +func (*ValidatingAdmissionPolicyList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{12} +} +func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } +func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{13} +} +func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{14} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + +func (m *Validation) Reset() { *m = Validation{} } +func (*Validation) ProtoMessage() {} +func (*Validation) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{15} +} +func (m *Validation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validation.Merge(m, src) +} +func (m *Validation) XXX_Size() int { + return m.Size() +} +func (m *Validation) XXX_DiscardUnknown() { + xxx_messageInfo_Validation.DiscardUnknown(m) +} + +var xxx_messageInfo_Validation proto.InternalMessageInfo + +func (m *Variable) Reset() { *m = Variable{} } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{16} +} +func (m *Variable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Variable) XXX_Merge(src proto.Message) { + xxx_messageInfo_Variable.Merge(m, src) +} +func (m *Variable) XXX_Size() int { + return m.Size() +} +func (m *Variable) XXX_DiscardUnknown() { + xxx_messageInfo_Variable.DiscardUnknown(m) +} + +var xxx_messageInfo_Variable proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition") + proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources") + proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations") + proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind") + proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1alpha1.TypeChecking") + proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy") + proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding") + proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList") + proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec") + proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList") + proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus") + proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Validation") + proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1alpha1.Variable") +} + +func init() { + proto.RegisterFile("k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptor_2c49182728ae0af5) +} + +var fileDescriptor_2c49182728ae0af5 = []byte{ + // 1498 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0x73, 0xb1, 0xe7, 0xdf, 0x2a, 0x6e, 0xfe, 0xd4, 0x1b, 0xad, + 0x2a, 0xd4, 0x48, 0xb0, 0x26, 0x69, 0xa1, 0xb4, 0x42, 0x42, 0xd9, 0xde, 0xe8, 0x25, 0x17, 0x4d, + 0x51, 0x22, 0x21, 0x90, 0x98, 0xec, 0x4e, 0xec, 0x69, 0xbc, 0x17, 0x76, 0xd6, 0xa1, 0x11, 0x48, + 0x54, 0xe2, 0x05, 0xde, 0x78, 0xe0, 0x85, 0x57, 0x3e, 0x02, 0xdf, 0x80, 0xb7, 0x3e, 0xf6, 0xb1, + 0x3c, 0x60, 0x51, 0xf3, 0xc2, 0x27, 0x00, 0x29, 0x2f, 0xa0, 0x99, 0x9d, 0xbd, 0xda, 0x26, 0x76, + 0x09, 0xbc, 0x79, 0xce, 0x9c, 0xf3, 0xfb, 0xcd, 0x39, 0x73, 0xce, 0xd9, 0x33, 0x06, 0xd7, 0x0e, + 0xde, 0x66, 0x3a, 0x75, 0x1b, 0xd8, 0xa3, 0x0d, 0x6c, 0xd9, 0x94, 0x31, 0xea, 0x3a, 0x3e, 0x69, + 0x52, 0x16, 0xf8, 0x38, 0xa0, 0xae, 0xd3, 0x38, 0x5c, 0xc5, 0x6d, 0xaf, 0x85, 0x57, 0x1b, 0x4d, + 0xe2, 0x10, 0x1f, 0x07, 0xc4, 0xd2, 0x3d, 0xdf, 0x0d, 0x5c, 0xb8, 0x12, 0x9a, 0xea, 0xd8, 0xa3, + 0xfa, 0x40, 0x53, 0x3d, 0x32, 0x5d, 0x7a, 0xbd, 0x49, 0x83, 0x56, 0x67, 0x4f, 0x37, 0x5d, 0xbb, + 0xd1, 0x74, 0x9b, 0x6e, 0x43, 0x20, 0xec, 0x75, 0xf6, 0xc5, 0x4a, 0x2c, 0xc4, 0xaf, 0x10, 0x79, + 0xe9, 0xf2, 0x08, 0x87, 0xca, 0x1f, 0x67, 0xe9, 0x4a, 0x62, 0x64, 0x63, 0xb3, 0x45, 0x1d, 0xe2, + 0x1f, 0x35, 0xbc, 0x83, 0x26, 0x17, 0xb0, 0x86, 0x4d, 0x02, 0x3c, 0xc8, 0xaa, 0x31, 0xcc, 0xca, + 0xef, 0x38, 0x01, 0xb5, 0x49, 0x9f, 0xc1, 0x5b, 0x27, 0x19, 0x30, 0xb3, 0x45, 0x6c, 0x9c, 0xb7, + 0xd3, 0x18, 0x58, 0x58, 0xef, 0x58, 0x34, 0x58, 0x77, 0x1c, 0x37, 0x10, 0x4e, 0xc0, 0x0b, 0xa0, + 0x70, 0x40, 0x8e, 0x6a, 0xca, 0xb2, 0x72, 0xa9, 0x64, 0x94, 0x9f, 0x76, 0xd5, 0x89, 0x5e, 0x57, + 0x2d, 0xdc, 0x27, 0x47, 0x88, 0xcb, 0xe1, 0x3a, 0x58, 0x38, 0xc4, 0xed, 0x0e, 0xb9, 0xf5, 0xd8, + 0xf3, 0x89, 0x08, 0x41, 0x6d, 0x52, 0xa8, 0x2e, 0x4a, 0xd5, 0x85, 0x9d, 0xec, 0x36, 0xca, 0xeb, + 0x6b, 0x6d, 0x50, 0x4d, 0x56, 0xbb, 0xd8, 0x77, 0xa8, 0xd3, 0x84, 0xaf, 0x81, 0x99, 0x7d, 0x4a, + 0xda, 0x16, 0x22, 0xfb, 0x12, 0xb0, 0x22, 0x01, 0x67, 0x6e, 0x4b, 0x39, 0x8a, 0x35, 0xe0, 0x0a, + 0x98, 0xfe, 0x34, 0x34, 0xac, 0x15, 0x84, 0xf2, 0x82, 0x54, 0x9e, 0x96, 0x78, 0x28, 0xda, 0xd7, + 0xf6, 0xc1, 0xfc, 0x06, 0x0e, 0xcc, 0xd6, 0x0d, 0xd7, 0xb1, 0xa8, 0xf0, 0x70, 0x19, 0x14, 0x1d, + 0x6c, 0x13, 0xe9, 0xe2, 0xac, 0xb4, 0x2c, 0x6e, 0x62, 0x9b, 0x20, 0xb1, 0x03, 0xd7, 0x00, 0x20, + 0x79, 0xff, 0xa0, 0xd4, 0x03, 0x29, 0xd7, 0x52, 0x5a, 0xda, 0x4f, 0x45, 0x49, 0x84, 0x08, 0x73, + 0x3b, 0xbe, 0x49, 0x18, 0x7c, 0x0c, 0xaa, 0x1c, 0x8e, 0x79, 0xd8, 0x24, 0x0f, 0x49, 0x9b, 0x98, + 0x81, 0xeb, 0x0b, 0xd6, 0xf2, 0xda, 0x65, 0x3d, 0xc9, 0xd3, 0xf8, 0xc6, 0x74, 0xef, 0xa0, 0xc9, + 0x05, 0x4c, 0xe7, 0x89, 0xa1, 0x1f, 0xae, 0xea, 0x0f, 0xf0, 0x1e, 0x69, 0x47, 0xa6, 0xc6, 0xb9, + 0x5e, 0x57, 0xad, 0x6e, 0xe6, 0x11, 0x51, 0x3f, 0x09, 0x74, 0xc1, 0xbc, 0xbb, 0xf7, 0x88, 0x98, + 0x41, 0x4c, 0x3b, 0xf9, 0xf2, 0xb4, 0xb0, 0xd7, 0x55, 0xe7, 0xb7, 0x32, 0x70, 0x28, 0x07, 0x0f, + 0xbf, 0x00, 0x73, 0xbe, 0xf4, 0x1b, 0x75, 0xda, 0x84, 0xd5, 0x0a, 0xcb, 0x85, 0x4b, 0xe5, 0x35, + 0x43, 0x1f, 0xb9, 0x1c, 0x75, 0xee, 0x98, 0xc5, 0x8d, 0x77, 0x69, 0xd0, 0xda, 0xf2, 0x48, 0xb8, + 0xcf, 0x8c, 0x73, 0x32, 0xf0, 0x73, 0x28, 0x4d, 0x80, 0xb2, 0x7c, 0xf0, 0x5b, 0x05, 0x9c, 0x25, + 0x8f, 0xcd, 0x76, 0xc7, 0x22, 0x19, 0xbd, 0x5a, 0xf1, 0xd4, 0x0e, 0xf2, 0x8a, 0x3c, 0xc8, 0xd9, + 0x5b, 0x03, 0x78, 0xd0, 0x40, 0x76, 0x78, 0x13, 0x94, 0x6d, 0x9e, 0x14, 0xdb, 0x6e, 0x9b, 0x9a, + 0x47, 0xb5, 0x69, 0x91, 0x4a, 0x5a, 0xaf, 0xab, 0x96, 0x37, 0x12, 0xf1, 0x71, 0x57, 0x5d, 0x48, + 0x2d, 0xdf, 0x3f, 0xf2, 0x08, 0x4a, 0x9b, 0x69, 0xcf, 0x15, 0xb0, 0x38, 0xe4, 0x54, 0xf0, 0x6a, + 0x12, 0x79, 0x91, 0x1a, 0x35, 0x65, 0xb9, 0x70, 0xa9, 0x64, 0x54, 0xd3, 0x11, 0x13, 0x1b, 0x28, + 0xab, 0x07, 0xbf, 0x54, 0x00, 0xf4, 0xfb, 0xf0, 0x64, 0xa2, 0x5c, 0x1d, 0x25, 0x5e, 0xfa, 0x80, + 0x20, 0x2d, 0xc9, 0x20, 0xc1, 0xfe, 0x3d, 0x34, 0x80, 0x4e, 0xc3, 0xa0, 0xb4, 0x8d, 0x7d, 0x6c, + 0xdf, 0xa7, 0x8e, 0xc5, 0xeb, 0x0e, 0x7b, 0x74, 0x87, 0xf8, 0xa2, 0xee, 0x94, 0x6c, 0xdd, 0xad, + 0x6f, 0xdf, 0x95, 0x3b, 0x28, 0xa5, 0xc5, 0xab, 0xf9, 0x80, 0x3a, 0x96, 0xac, 0xd2, 0xb8, 0x9a, + 0x39, 0x1e, 0x12, 0x3b, 0xda, 0x0f, 0x93, 0x60, 0x46, 0x70, 0xf0, 0xce, 0x71, 0x72, 0xf1, 0x37, + 0x40, 0x29, 0x2e, 0x28, 0x89, 0x5a, 0x95, 0x6a, 0xa5, 0xb8, 0xf8, 0x50, 0xa2, 0x03, 0x3f, 0x02, + 0x33, 0x2c, 0x2a, 0xb3, 0xc2, 0xcb, 0x97, 0xd9, 0x2c, 0xef, 0x75, 0x71, 0x81, 0xc5, 0x90, 0x30, + 0x00, 0x8b, 0x1e, 0x3f, 0x3d, 0x09, 0x88, 0xbf, 0xe9, 0x06, 0xb7, 0xdd, 0x8e, 0x63, 0xad, 0x9b, + 0x3c, 0x7a, 0xb5, 0xa2, 0x38, 0xdd, 0xf5, 0x5e, 0x57, 0x5d, 0xdc, 0x1e, 0xac, 0x72, 0xdc, 0x55, + 0xff, 0x3f, 0x64, 0x4b, 0xa4, 0xd9, 0x30, 0x68, 0xed, 0x3b, 0x05, 0xcc, 0x72, 0x8d, 0x1b, 0x2d, + 0x62, 0x1e, 0xf0, 0x06, 0xfd, 0x95, 0x02, 0x20, 0xc9, 0xb7, 0xed, 0x30, 0xdb, 0xca, 0x6b, 0xef, + 0x8c, 0x51, 0x5e, 0x7d, 0xbd, 0x3f, 0xc9, 0x99, 0xbe, 0x2d, 0x86, 0x06, 0x70, 0x6a, 0x3f, 0x4f, + 0x82, 0xf3, 0x3b, 0xb8, 0x4d, 0x2d, 0x1c, 0x50, 0xa7, 0xb9, 0x1e, 0xd1, 0x85, 0xc5, 0x02, 0x3f, + 0x06, 0x33, 0x3c, 0xc0, 0x16, 0x0e, 0xb0, 0x6c, 0xb6, 0x6f, 0x8c, 0x76, 0x1d, 0x61, 0x8b, 0xdb, + 0x20, 0x01, 0x4e, 0x92, 0x2e, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x02, 0x45, 0xe6, 0x11, 0x53, 0x96, + 0xca, 0x7b, 0x63, 0xf8, 0x3e, 0xf4, 0xd4, 0x0f, 0x3d, 0x62, 0x26, 0xd9, 0xc8, 0x57, 0x48, 0x70, + 0x40, 0x1f, 0x4c, 0xb1, 0x00, 0x07, 0x1d, 0x26, 0x53, 0xeb, 0xde, 0xa9, 0xb0, 0x09, 0x44, 0x63, + 0x5e, 0xf2, 0x4d, 0x85, 0x6b, 0x24, 0x99, 0xb4, 0x3f, 0x14, 0xb0, 0x3c, 0xd4, 0xd6, 0xa0, 0x8e, + 0xc5, 0xf3, 0xe1, 0xdf, 0x0f, 0xf3, 0x27, 0x99, 0x30, 0x6f, 0x9d, 0x86, 0xe3, 0xf2, 0xf0, 0xc3, + 0xa2, 0xad, 0xfd, 0xae, 0x80, 0x8b, 0x27, 0x19, 0x3f, 0xa0, 0x2c, 0x80, 0x1f, 0xf6, 0x79, 0xaf, + 0x8f, 0x58, 0xf3, 0x94, 0x85, 0xbe, 0xc7, 0xe3, 0x4d, 0x24, 0x49, 0x79, 0xee, 0x81, 0x33, 0x34, + 0x20, 0x36, 0x6f, 0xc6, 0xbc, 0xba, 0xee, 0x9f, 0xa2, 0xeb, 0xc6, 0x9c, 0xe4, 0x3d, 0x73, 0x97, + 0x33, 0xa0, 0x90, 0x48, 0xfb, 0xba, 0x70, 0xb2, 0xe3, 0x3c, 0x4e, 0xbc, 0x45, 0x7b, 0x42, 0xb8, + 0x99, 0x74, 0xd1, 0xf8, 0x1a, 0xb7, 0xe3, 0x1d, 0x94, 0xd2, 0xe2, 0x0d, 0xd2, 0x93, 0xfd, 0x77, + 0xc0, 0x1c, 0x72, 0x92, 0x47, 0x51, 0xeb, 0x0e, 0x1b, 0x64, 0xb4, 0x42, 0x31, 0x24, 0xec, 0x80, + 0x79, 0x3b, 0x33, 0x78, 0xc9, 0x52, 0xb9, 0x36, 0x06, 0x49, 0x76, 0x72, 0x0b, 0x47, 0x9e, 0xac, + 0x0c, 0xe5, 0x48, 0xe0, 0x2e, 0xa8, 0x1e, 0xca, 0x88, 0xb9, 0x4e, 0xd8, 0x35, 0xc3, 0x69, 0xa3, + 0x64, 0xac, 0xf0, 0x41, 0x6d, 0x27, 0xbf, 0x79, 0xdc, 0x55, 0x2b, 0x79, 0x21, 0xea, 0xc7, 0xd0, + 0x7e, 0x53, 0xc0, 0x85, 0xa1, 0x77, 0xf1, 0x1f, 0x64, 0x1f, 0xcd, 0x66, 0xdf, 0xcd, 0x53, 0xc9, + 0xbe, 0xc1, 0x69, 0xf7, 0xfd, 0xd4, 0xdf, 0xb8, 0x2a, 0xf2, 0x0d, 0x83, 0x92, 0x17, 0xcd, 0x07, + 0xd2, 0xd7, 0x2b, 0xe3, 0x26, 0x0f, 0xb7, 0x35, 0xe6, 0xf8, 0xf7, 0x3b, 0x5e, 0xa2, 0x04, 0x15, + 0x7e, 0x06, 0x2a, 0xb6, 0x7c, 0x21, 0x70, 0x00, 0xea, 0x04, 0xd1, 0x14, 0xf4, 0x0f, 0x32, 0xe8, + 0x6c, 0xaf, 0xab, 0x56, 0x36, 0x72, 0xb0, 0xa8, 0x8f, 0x08, 0xb6, 0x41, 0x39, 0xc9, 0x80, 0x68, + 0x6c, 0x7e, 0xf3, 0x25, 0x42, 0xee, 0x3a, 0xc6, 0xff, 0x64, 0x8c, 0xcb, 0x89, 0x8c, 0xa1, 0x34, + 0x3c, 0x7c, 0x00, 0xe6, 0xf6, 0x31, 0x6d, 0x77, 0x7c, 0x22, 0x07, 0xd2, 0x70, 0x82, 0x78, 0x95, + 0x0f, 0x8b, 0xb7, 0xd3, 0x1b, 0xc7, 0x5d, 0xb5, 0x9a, 0x11, 0x88, 0x69, 0x21, 0x6b, 0x0c, 0x9f, + 0x28, 0xa0, 0x82, 0xb3, 0xcf, 0x47, 0x56, 0x3b, 0x23, 0x3c, 0xb8, 0x3e, 0x86, 0x07, 0xb9, 0x17, + 0xa8, 0x51, 0x93, 0x6e, 0x54, 0x72, 0x1b, 0x0c, 0xf5, 0xb1, 0xc1, 0xcf, 0xc1, 0x82, 0x9d, 0x79, + 0xdd, 0xb1, 0xda, 0x94, 0x38, 0xc0, 0xd8, 0x57, 0x17, 0x23, 0x24, 0x2f, 0xd9, 0xac, 0x9c, 0xa1, + 0x3c, 0x15, 0xb4, 0x40, 0xe9, 0x10, 0xfb, 0x14, 0xef, 0xf1, 0x87, 0xc6, 0xb4, 0xe0, 0xbd, 0x3c, + 0xd6, 0xd5, 0x85, 0xb6, 0xc9, 0x7c, 0x19, 0x49, 0x18, 0x4a, 0x80, 0xb5, 0x1f, 0x27, 0x81, 0x7a, + 0xc2, 0xa7, 0x1c, 0xde, 0x03, 0xd0, 0xdd, 0x63, 0xc4, 0x3f, 0x24, 0xd6, 0x9d, 0xf0, 0x8d, 0x1f, + 0x4d, 0xd0, 0x85, 0x64, 0xbc, 0xda, 0xea, 0xd3, 0x40, 0x03, 0xac, 0xa0, 0x0d, 0x66, 0x83, 0xd4, + 0xe4, 0x37, 0xce, 0x8b, 0x40, 0x3a, 0x96, 0x1e, 0x1c, 0x8d, 0x4a, 0xaf, 0xab, 0x66, 0x46, 0x49, + 0x94, 0x81, 0x87, 0x26, 0x00, 0x66, 0x72, 0x7b, 0x61, 0x01, 0x34, 0x46, 0x6b, 0x67, 0xc9, 0x9d, + 0xc5, 0x9f, 0xa0, 0xd4, 0x75, 0xa5, 0x60, 0xb5, 0x3f, 0x15, 0x00, 0x92, 0xaa, 0x80, 0x17, 0x41, + 0xea, 0x19, 0x2f, 0xbf, 0x62, 0x45, 0x0e, 0x81, 0x52, 0x72, 0xb8, 0x02, 0xa6, 0x6d, 0xc2, 0x18, + 0x6e, 0x46, 0xef, 0x80, 0xf8, 0x5f, 0x86, 0x8d, 0x50, 0x8c, 0xa2, 0x7d, 0xb8, 0x0b, 0xa6, 0x7c, + 0x82, 0x99, 0xeb, 0xc8, 0xff, 0x23, 0xde, 0xe5, 0x63, 0x15, 0x12, 0x92, 0xe3, 0xae, 0xba, 0x3a, + 0xca, 0xbf, 0x40, 0xba, 0x9c, 0xc2, 0x84, 0x11, 0x92, 0x70, 0xf0, 0x0e, 0xa8, 0x4a, 0x8e, 0xd4, + 0x81, 0xc3, 0xaa, 0x3d, 0x2f, 0x4f, 0x53, 0xdd, 0xc8, 0x2b, 0xa0, 0x7e, 0x1b, 0xed, 0x1e, 0x98, + 0x89, 0xb2, 0x0b, 0xd6, 0x40, 0x31, 0xf5, 0xf9, 0x0e, 0x1d, 0x17, 0x92, 0x5c, 0x60, 0x26, 0x07, + 0x07, 0xc6, 0xd8, 0x7a, 0xfa, 0xa2, 0x3e, 0xf1, 0xec, 0x45, 0x7d, 0xe2, 0xf9, 0x8b, 0xfa, 0xc4, + 0x93, 0x5e, 0x5d, 0x79, 0xda, 0xab, 0x2b, 0xcf, 0x7a, 0x75, 0xe5, 0x79, 0xaf, 0xae, 0xfc, 0xd2, + 0xab, 0x2b, 0xdf, 0xfc, 0x5a, 0x9f, 0xf8, 0x60, 0x65, 0xe4, 0x7f, 0xf1, 0xfe, 0x0a, 0x00, 0x00, + 0xff, 0xff, 0x22, 0xbd, 0xc5, 0xc7, 0xf1, 0x13, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MatchResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x3a + } + if len(m.ExcludeResourceRules) > 0 { + for iNdEx := len(m.ExcludeResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExcludeResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ParamKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ParamRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i-- + dAtA[i] = 0x22 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleWithOperations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RuleWithOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex]) + m.ParameterNotFoundAction = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingAdmissionPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validations = append(m.Validations, Validation{}) + if err := m.Validations[len(m.Validations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} + } + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Variable) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Variable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto new file mode 100644 index 000000000..d5974d5ec --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -0,0 +1,609 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.admissionregistration.v1alpha1; + +import "k8s.io/api/admissionregistration/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/admissionregistration/v1alpha1"; + +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +message MatchResources { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + repeated NamedRuleWithOperations resourceRules = 3; + + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + repeated NamedRuleWithOperations excludeResourceRules = 4; + + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 7; +} + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +message NamedRuleWithOperations { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + repeated string resourceNames = 1; + + // RuleWithOperations is a tuple of Operations and Resources. + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +message ParamKind { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + optional string apiVersion = 1; + + // Kind is the API kind the resources belong to. + // Required. + optional string kind = 2; +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +message ParamRef { + // `name` is the name of the resource being referenced. + // + // `name` and `selector` are mutually exclusive properties. If one is set, + // the other must be unset. + // + // +optional + optional string name = 1; + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + optional string namespace = 2; + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // Default to `Deny` + // +optional + optional string parameterNotFoundAction = 4; +} + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +message ValidatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; +} + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message ValidatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + optional ValidatingAdmissionPolicyBindingSpec spec = 2; +} + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated ValidatingAdmissionPolicyBinding items = 2; +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingSpec { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; +} + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated ValidatingAdmissionPolicy items = 2; +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +message ValidatingAdmissionPolicySpec { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + optional MatchResources matchConstraints = 2; + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + repeated Validation validations = 3; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated Variable variables = 7; +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; +} + +// Validation specifies the CEL expression which is used to apply the validation. +message Validation { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + optional string Expression = 1; + + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + optional string message = 2; + + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; +} + +// Variable is the definition of a variable that is used for composition. +message Variable { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + optional string Name = 1; + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + optional string Expression = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go new file mode 100644 index 000000000..d4c2fbe80 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. +// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingAdmissionPolicy{}, + &ValidatingAdmissionPolicyList{}, + &ValidatingAdmissionPolicyBinding{}, + &ValidatingAdmissionPolicyBindingList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go new file mode 100644 index 000000000..78d918bc7 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -0,0 +1,665 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule = v1.Rule + +// ScopeType specifies a scope for a Rule. +// +enum +type ScopeType = v1.ScopeType + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = v1.ClusterScope + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = v1.NamespacedScope + // AllScopes means that all scopes are included. + AllScopes ScopeType = v1.AllScopes +) + +// ParameterNotFoundActionType specifies a failure policy that defines how a binding +// is evaluated when the param referred by its perNamespaceParamRef is not found. +// +enum +type ParameterNotFoundActionType string + +const ( + // Ignore means that an error finding params for a binding is ignored + AllowAction ParameterNotFoundActionType = "Allow" + // Fail means that an error finding params for a binding is ignored + DenyAction ParameterNotFoundActionType = "Deny" +) + +// FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled. +// +enum +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// MatchPolicyType specifies the type of match policy. +// +enum +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule. + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +type ValidatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +type ValidatingAdmissionPolicySpec struct { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` +} + +type MatchCondition v1.MatchCondition + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +type ParamKind struct { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"` + + // Kind is the API kind the resources belong to. + // Required. + Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"` +} + +// Validation specifies the CEL expression which is used to apply the validation. +type Validation struct { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"` + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// Variable is the definition of a variable that is used for composition. +type Variable struct { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + Name string `json:"name" protobuf:"bytes,1,opt,name=Name"` + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type ValidatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingSpec struct { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +type ParamRef struct { + // `name` is the name of the resource being referenced. + // + // `name` and `selector` are mutually exclusive properties. If one is set, + // the other must be unset. + // + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"` + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"` + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // Default to `Deny` + // +optional + ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"` +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +type MatchResources struct { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"` + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"` + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"` + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` +} + +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +type NamedRuleWithOperations struct { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"` + // RuleWithOperations is a tuple of Operations and Resources. + RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"` +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations = v1.RuleWithOperations + +// OperationType specifies an operation for a request. +// +enum +type OperationType = v1.OperationType + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = v1.OperationAll + Create OperationType = v1.Create + Update OperationType = v1.Update + Delete OperationType = v1.Delete + Connect OperationType = v1.Connect +) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..dcf46b324 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,204 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + +var map_MatchResources = map[string]string{ + "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", +} + +func (MatchResources) SwaggerDoc() map[string]string { + return map_MatchResources +} + +var map_NamedRuleWithOperations = map[string]string{ + "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", +} + +func (NamedRuleWithOperations) SwaggerDoc() map[string]string { + return map_NamedRuleWithOperations +} + +var map_ParamKind = map[string]string{ + "": "ParamKind is a tuple of Group Kind and Version.", + "apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "kind": "Kind is the API kind the resources belong to. Required.", +} + +func (ParamKind) SwaggerDoc() map[string]string { + return map_ParamKind +} + +var map_ParamRef = map[string]string{ + "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "name": "`name` is the name of the resource being referenced.\n\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny` Default to `Deny`", +} + +func (ParamRef) SwaggerDoc() map[string]string { + return map_ParamRef +} + +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + +var map_ValidatingAdmissionPolicy = map[string]string{ + "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", +} + +func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicy +} + +var map_ValidatingAdmissionPolicyBinding = map[string]string{ + "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", +} + +func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBinding +} + +var map_ValidatingAdmissionPolicyBindingList = map[string]string{ + "": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingList +} + +var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", +} + +func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingSpec +} + +var map_ValidatingAdmissionPolicyList = map[string]string{ + "": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyList +} + +var map_ValidatingAdmissionPolicySpec = map[string]string{ + "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", +} + +func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicySpec +} + +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + +var map_Validation = map[string]string{ + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", +} + +func (Validation) SwaggerDoc() map[string]string { + return map_Validation +} + +var map_Variable = map[string]string{ + "": "Variable is the definition of a variable that is used for composition.", + "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", +} + +func (Variable) SwaggerDoc() map[string]string { + return map_Variable +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..24cd0e4e9 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,475 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchResources) DeepCopyInto(out *MatchResources) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeResourceRules != nil { + in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources. +func (in *MatchResources) DeepCopy() *MatchResources { + if in == nil { + return nil + } + out := new(MatchResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { + *out = *in + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations. +func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations { + if in == nil { + return nil + } + out := new(NamedRuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamKind) DeepCopyInto(out *ParamKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind. +func (in *ParamKind) DeepCopy() *ParamKind { + if in == nil { + return nil + } + out := new(ParamKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamRef) DeepCopyInto(out *ParamRef) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ParameterNotFoundAction != nil { + in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction + *out = new(ParameterNotFoundActionType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef. +func (in *ParamRef) DeepCopy() *ParamRef { + if in == nil { + return nil + } + out := new(ParamRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy. +func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding. +func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList. +func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Validations != nil { + in, out := &in.Validations, &out.Validations + *out = make([]Validation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec. +func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Validation) DeepCopyInto(out *Validation) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(v1.StatusReason) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation. +func (in *Validation) DeepCopy() *Validation { + if in == nil { + return nil + } + out := new(Validation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 9f0988ca7..261ae41bd 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// source: k8s.io/api/admissionregistration/v1beta1/generated.proto package v1beta1 @@ -25,6 +25,8 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/admissionregistration/v1" + k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -44,10 +46,122 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } +func (*AuditAnnotation) ProtoMessage() {} +func (*AuditAnnotation) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{0} +} +func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditAnnotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditAnnotation.Merge(m, src) +} +func (m *AuditAnnotation) XXX_Size() int { + return m.Size() +} +func (m *AuditAnnotation) XXX_DiscardUnknown() { + xxx_messageInfo_AuditAnnotation.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo + +func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } +func (*ExpressionWarning) ProtoMessage() {} +func (*ExpressionWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{1} +} +func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExpressionWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExpressionWarning.Merge(m, src) +} +func (m *ExpressionWarning) XXX_Size() int { + return m.Size() +} +func (m *ExpressionWarning) XXX_DiscardUnknown() { + xxx_messageInfo_ExpressionWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo + +func (m *MatchCondition) Reset() { *m = MatchCondition{} } +func (*MatchCondition) ProtoMessage() {} +func (*MatchCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{2} +} +func (m *MatchCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchCondition.Merge(m, src) +} +func (m *MatchCondition) XXX_Size() int { + return m.Size() +} +func (m *MatchCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MatchCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchCondition proto.InternalMessageInfo + +func (m *MatchResources) Reset() { *m = MatchResources{} } +func (*MatchResources) ProtoMessage() {} +func (*MatchResources) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{3} +} +func (m *MatchResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MatchResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_MatchResources.Merge(m, src) +} +func (m *MatchResources) XXX_Size() int { + return m.Size() +} +func (m *MatchResources) XXX_DiscardUnknown() { + xxx_messageInfo_MatchResources.DiscardUnknown(m) +} + +var xxx_messageInfo_MatchResources proto.InternalMessageInfo + func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } func (*MutatingWebhook) ProtoMessage() {} func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{0} + return fileDescriptor_7f7c65a4f012fb19, []int{4} } func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +189,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{1} + return fileDescriptor_7f7c65a4f012fb19, []int{5} } func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +217,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{2} + return fileDescriptor_7f7c65a4f012fb19, []int{6} } func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -128,15 +242,15 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{3} +func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } +func (*NamedRuleWithOperations) ProtoMessage() {} +func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{7} } -func (m *Rule) XXX_Unmarshal(b []byte) error { +func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -144,27 +258,27 @@ func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { } return b[:n], nil } -func (m *Rule) XXX_Merge(src proto.Message) { - xxx_messageInfo_Rule.Merge(m, src) +func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRuleWithOperations.Merge(m, src) } -func (m *Rule) XXX_Size() int { +func (m *NamedRuleWithOperations) XXX_Size() int { return m.Size() } -func (m *Rule) XXX_DiscardUnknown() { - xxx_messageInfo_Rule.DiscardUnknown(m) +func (m *NamedRuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m) } -var xxx_messageInfo_Rule proto.InternalMessageInfo +var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{4} +func (m *ParamKind) Reset() { *m = ParamKind{} } +func (*ParamKind) ProtoMessage() {} +func (*ParamKind) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{8} } -func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { +func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -172,22 +286,50 @@ func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, } return b[:n], nil } -func (m *RuleWithOperations) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuleWithOperations.Merge(m, src) +func (m *ParamKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamKind.Merge(m, src) } -func (m *RuleWithOperations) XXX_Size() int { +func (m *ParamKind) XXX_Size() int { return m.Size() } -func (m *RuleWithOperations) XXX_DiscardUnknown() { - xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +func (m *ParamKind) XXX_DiscardUnknown() { + xxx_messageInfo_ParamKind.DiscardUnknown(m) } -var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo +var xxx_messageInfo_ParamKind proto.InternalMessageInfo + +func (m *ParamRef) Reset() { *m = ParamRef{} } +func (*ParamRef) ProtoMessage() {} +func (*ParamRef) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{9} +} +func (m *ParamRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParamRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamRef.Merge(m, src) +} +func (m *ParamRef) XXX_Size() int { + return m.Size() +} +func (m *ParamRef) XXX_DiscardUnknown() { + xxx_messageInfo_ParamRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *ServiceReference) Reset() { *m = ServiceReference{} } func (*ServiceReference) ProtoMessage() {} func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{5} + return fileDescriptor_7f7c65a4f012fb19, []int{10} } func (m *ServiceReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -212,10 +354,234 @@ func (m *ServiceReference) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceReference proto.InternalMessageInfo +func (m *TypeChecking) Reset() { *m = TypeChecking{} } +func (*TypeChecking) ProtoMessage() {} +func (*TypeChecking) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{11} +} +func (m *TypeChecking) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeChecking) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeChecking.Merge(m, src) +} +func (m *TypeChecking) XXX_Size() int { + return m.Size() +} +func (m *TypeChecking) XXX_DiscardUnknown() { + xxx_messageInfo_TypeChecking.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeChecking proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } +func (*ValidatingAdmissionPolicy) ProtoMessage() {} +func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{12} +} +func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src) +} +func (m *ValidatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } +func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{13} +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } +func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{14} +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } +func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{15} +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } +func (*ValidatingAdmissionPolicyList) ProtoMessage() {} +func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{16} +} +func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } +func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} +func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{17} +} +func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src) +} +func (m *ValidatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } +func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} +func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{18} +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src) +} +func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int { + return m.Size() +} +func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo + func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } func (*ValidatingWebhook) ProtoMessage() {} func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{6} + return fileDescriptor_7f7c65a4f012fb19, []int{19} } func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +609,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{7} + return fileDescriptor_7f7c65a4f012fb19, []int{20} } func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +637,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{8} + return fileDescriptor_7f7c65a4f012fb19, []int{21} } func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -296,10 +662,66 @@ func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo +func (m *Validation) Reset() { *m = Validation{} } +func (*Validation) ProtoMessage() {} +func (*Validation) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{22} +} +func (m *Validation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Validation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validation.Merge(m, src) +} +func (m *Validation) XXX_Size() int { + return m.Size() +} +func (m *Validation) XXX_DiscardUnknown() { + xxx_messageInfo_Validation.DiscardUnknown(m) +} + +var xxx_messageInfo_Validation proto.InternalMessageInfo + +func (m *Variable) Reset() { *m = Variable{} } +func (*Variable) ProtoMessage() {} +func (*Variable) Descriptor() ([]byte, []int) { + return fileDescriptor_7f7c65a4f012fb19, []int{23} +} +func (m *Variable) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Variable) XXX_Merge(src proto.Message) { + xxx_messageInfo_Variable.Merge(m, src) +} +func (m *Variable) XXX_Size() int { + return m.Size() +} +func (m *Variable) XXX_DiscardUnknown() { + xxx_messageInfo_Variable.DiscardUnknown(m) +} + +var xxx_messageInfo_Variable proto.InternalMessageInfo + func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } func (*WebhookClientConfig) ProtoMessage() {} func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{9} + return fileDescriptor_7f7c65a4f012fb19, []int{24} } func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -325,97 +747,165 @@ func (m *WebhookClientConfig) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo func init() { + proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation") + proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning") + proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition") + proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources") proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") - proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") - proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") + proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations") + proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind") + proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1beta1.TypeChecking") + proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy") + proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding") + proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingList") + proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec") + proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyList") + proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec") + proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus") proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1beta1.Validation") + proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1beta1.Variable") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) -} - -var fileDescriptor_abeea74cbc46f55a = []byte{ - // 1112 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x23, 0x45, - 0x13, 0xce, 0xc4, 0xf6, 0xda, 0x6e, 0x27, 0x9b, 0x4d, 0xbf, 0x2f, 0xbb, 0x26, 0xac, 0x3c, 0x96, - 0x0f, 0xc8, 0x12, 0xec, 0xcc, 0x26, 0x20, 0x04, 0x0b, 0x08, 0xc5, 0x81, 0x85, 0x48, 0xc9, 0x6e, - 0xe8, 0xec, 0x87, 0xc4, 0x87, 0xb4, 0xed, 0x71, 0xd9, 0x6e, 0x6c, 0x4f, 0x8f, 0xa6, 0x7b, 0xbc, - 0xe4, 0xc6, 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, - 0x32, 0x9c, 0x38, 0x70, 0xe0, 0x9a, 0x13, 0x9a, 0x9e, 0xf6, 0xf8, 0x2b, 0x59, 0x4c, 0x90, 0xf6, - 0x94, 0xdb, 0xf4, 0x53, 0x5d, 0x4f, 0x75, 0xd5, 0x54, 0xd5, 0x83, 0x3e, 0xef, 0xbd, 0x2b, 0x2c, - 0xc6, 0xed, 0x5e, 0xd0, 0x04, 0xdf, 0x05, 0x09, 0xc2, 0x1e, 0x82, 0xdb, 0xe2, 0xbe, 0xad, 0x0d, - 0xd4, 0x63, 0x36, 0x6d, 0x0d, 0x98, 0x10, 0x8c, 0xbb, 0x3e, 0x74, 0x98, 0x90, 0x3e, 0x95, 0x8c, - 0xbb, 0xf6, 0x70, 0xb3, 0x09, 0x92, 0x6e, 0xda, 0x1d, 0x70, 0xc1, 0xa7, 0x12, 0x5a, 0x96, 0xe7, - 0x73, 0xc9, 0x71, 0x3d, 0xf1, 0xb4, 0xa8, 0xc7, 0xac, 0x33, 0x3d, 0x2d, 0xed, 0xb9, 0x71, 0xab, - 0xc3, 0x64, 0x37, 0x68, 0x5a, 0x0e, 0x1f, 0xd8, 0x1d, 0xde, 0xe1, 0xb6, 0x22, 0x68, 0x06, 0x6d, - 0x75, 0x52, 0x07, 0xf5, 0x95, 0x10, 0x6f, 0xbc, 0x3d, 0x7e, 0xd2, 0x80, 0x3a, 0x5d, 0xe6, 0x82, - 0x7f, 0x64, 0x7b, 0xbd, 0x4e, 0x0c, 0x08, 0x7b, 0x00, 0x92, 0xda, 0xc3, 0xb9, 0xe7, 0x6c, 0xd8, - 0xe7, 0x79, 0xf9, 0x81, 0x2b, 0xd9, 0x00, 0xe6, 0x1c, 0xde, 0xf9, 0x27, 0x07, 0xe1, 0x74, 0x61, - 0x40, 0x67, 0xfd, 0x6a, 0xbf, 0xe4, 0xd1, 0xda, 0x7e, 0x20, 0xa9, 0x64, 0x6e, 0xe7, 0x31, 0x34, - 0xbb, 0x9c, 0xf7, 0x70, 0x15, 0x65, 0x5d, 0x3a, 0x80, 0xb2, 0x51, 0x35, 0xea, 0xc5, 0xc6, 0xca, - 0x71, 0x68, 0x2e, 0x45, 0xa1, 0x99, 0xbd, 0x47, 0x07, 0x40, 0x94, 0x05, 0x3f, 0x45, 0x2b, 0x4e, - 0x9f, 0x81, 0x2b, 0x77, 0xb8, 0xdb, 0x66, 0x9d, 0xf2, 0x72, 0xd5, 0xa8, 0x97, 0xb6, 0x3e, 0xb4, - 0x16, 0x2d, 0xa2, 0xa5, 0x43, 0xed, 0x4c, 0x90, 0x34, 0xfe, 0xaf, 0x03, 0xad, 0x4c, 0xa2, 0x64, - 0x2a, 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x7d, 0x10, 0xe5, 0x4c, 0x35, 0x53, 0x2f, 0x6d, 0x7d, 0xb0, - 0x78, 0x44, 0x12, 0xf4, 0xe1, 0x31, 0x93, 0xdd, 0xfb, 0x1e, 0x24, 0x16, 0xd1, 0x58, 0xd5, 0x01, - 0x73, 0xb1, 0x4d, 0x90, 0x84, 0x19, 0xef, 0xa1, 0xd5, 0x36, 0x65, 0xfd, 0xc0, 0x87, 0x03, 0xde, - 0x67, 0xce, 0x51, 0x39, 0xab, 0xca, 0xf0, 0x7a, 0x14, 0x9a, 0xab, 0x77, 0x27, 0x0d, 0xa7, 0xa1, - 0xb9, 0x3e, 0x05, 0x3c, 0x38, 0xf2, 0x80, 0x4c, 0x3b, 0xe3, 0x8f, 0x51, 0x69, 0x40, 0xa5, 0xd3, - 0xd5, 0x5c, 0x45, 0xc5, 0x55, 0x8b, 0x42, 0xb3, 0xb4, 0x3f, 0x86, 0x4f, 0x43, 0x73, 0x6d, 0xe2, - 0xa8, 0x78, 0x26, 0xdd, 0xf0, 0xb7, 0x68, 0x3d, 0xae, 0xbb, 0xf0, 0xa8, 0x03, 0x87, 0xd0, 0x07, - 0x47, 0x72, 0xbf, 0x9c, 0x53, 0x45, 0x7f, 0x6b, 0xa2, 0x04, 0xe9, 0x9f, 0xb7, 0xbc, 0x5e, 0x27, - 0x06, 0x84, 0x15, 0x37, 0x98, 0x35, 0xdc, 0xb4, 0xf6, 0x68, 0x13, 0xfa, 0x23, 0xd7, 0xc6, 0x2b, - 0x51, 0x68, 0xae, 0xdf, 0x9b, 0x65, 0x24, 0xf3, 0x41, 0x30, 0x47, 0x57, 0x79, 0xf3, 0x1b, 0x70, - 0x64, 0x1a, 0xb6, 0x74, 0xf1, 0xb0, 0x38, 0x0a, 0xcd, 0xab, 0xf7, 0xa7, 0xe8, 0xc8, 0x0c, 0x7d, - 0x5c, 0x30, 0xc1, 0x5a, 0xf0, 0x49, 0xbb, 0x0d, 0x8e, 0x14, 0xe5, 0x2b, 0xe3, 0x82, 0x1d, 0x8e, - 0xe1, 0xb8, 0x60, 0xe3, 0xe3, 0x4e, 0x9f, 0x0a, 0x41, 0x26, 0xdd, 0xf0, 0x1d, 0x74, 0x35, 0xee, - 0x7a, 0x1e, 0xc8, 0x43, 0x70, 0xb8, 0xdb, 0x12, 0xe5, 0x7c, 0xd5, 0xa8, 0xe7, 0x92, 0x17, 0x3c, - 0x98, 0xb2, 0x90, 0x99, 0x9b, 0xf8, 0x21, 0xba, 0x91, 0xb6, 0x12, 0x81, 0x21, 0x83, 0xa7, 0x8f, - 0xc0, 0x8f, 0x0f, 0xa2, 0x5c, 0xa8, 0x66, 0xea, 0xc5, 0xc6, 0x6b, 0x51, 0x68, 0xde, 0xd8, 0x3e, - 0xfb, 0x0a, 0x39, 0xcf, 0x17, 0x3f, 0x41, 0xd8, 0x07, 0xe6, 0x0e, 0xb9, 0xa3, 0xda, 0x4f, 0x37, - 0x04, 0x52, 0xf9, 0xdd, 0x8e, 0x42, 0x13, 0x93, 0x39, 0xeb, 0x69, 0x68, 0x5e, 0x9f, 0x47, 0x55, - 0x7b, 0x9c, 0xc1, 0x55, 0xfb, 0xd5, 0x40, 0x37, 0x67, 0x66, 0x39, 0x19, 0x9b, 0x20, 0xe9, 0x78, - 0xfc, 0x04, 0x15, 0xe2, 0x1f, 0xd3, 0xa2, 0x92, 0xaa, 0xe1, 0x2e, 0x6d, 0xdd, 0x5e, 0xec, 0x37, - 0x26, 0xff, 0x6c, 0x1f, 0x24, 0x6d, 0x60, 0x3d, 0x34, 0x68, 0x8c, 0x91, 0x94, 0x15, 0x7f, 0x89, - 0x0a, 0x3a, 0xb2, 0x28, 0x2f, 0xab, 0x11, 0x7d, 0x6f, 0xf1, 0x11, 0x9d, 0x79, 0x7b, 0x23, 0x1b, - 0x87, 0x22, 0x29, 0x61, 0xed, 0x4f, 0x03, 0x55, 0x5f, 0x94, 0xdf, 0x1e, 0x13, 0x12, 0x7f, 0x35, - 0x97, 0xa3, 0xb5, 0x60, 0xab, 0x32, 0x91, 0x64, 0x78, 0x4d, 0x67, 0x58, 0x18, 0x21, 0x13, 0xf9, - 0xf5, 0x50, 0x8e, 0x49, 0x18, 0x8c, 0x92, 0xbb, 0x7b, 0xe1, 0xe4, 0xa6, 0x1e, 0x3e, 0xde, 0x44, - 0xbb, 0x31, 0x39, 0x49, 0x62, 0xd4, 0x7e, 0x36, 0x50, 0x36, 0x5e, 0x4d, 0xf8, 0x0d, 0x54, 0xa4, - 0x1e, 0xfb, 0xd4, 0xe7, 0x81, 0x27, 0xca, 0x86, 0xea, 0xc1, 0xd5, 0x28, 0x34, 0x8b, 0xdb, 0x07, - 0xbb, 0x09, 0x48, 0xc6, 0x76, 0xbc, 0x89, 0x4a, 0xd4, 0x63, 0x69, 0xcb, 0x2e, 0xab, 0xeb, 0x6b, - 0xf1, 0x00, 0x6d, 0x1f, 0xec, 0xa6, 0x6d, 0x3a, 0x79, 0x27, 0xe6, 0xf7, 0x41, 0xf0, 0xc0, 0x77, - 0xf4, 0x66, 0xd5, 0xfc, 0x64, 0x04, 0x92, 0xb1, 0x1d, 0xbf, 0x89, 0x72, 0xc2, 0xe1, 0x1e, 0xe8, - 0xbd, 0x78, 0x3d, 0x7e, 0xf6, 0x61, 0x0c, 0x9c, 0x86, 0x66, 0x51, 0x7d, 0xa8, 0x06, 0x4d, 0x2e, - 0xd5, 0x7e, 0x34, 0x10, 0x9e, 0x5f, 0xbd, 0xf8, 0x23, 0x84, 0x78, 0x7a, 0xd2, 0x29, 0x99, 0xaa, - 0xab, 0x52, 0xf4, 0x34, 0x34, 0x57, 0xd3, 0x93, 0xa2, 0x9c, 0x70, 0xc1, 0x07, 0x28, 0x1b, 0xaf, - 0x6b, 0xad, 0x3c, 0xd6, 0xbf, 0xd3, 0x81, 0xb1, 0xa6, 0xc5, 0x27, 0xa2, 0x98, 0x6a, 0x3f, 0x18, - 0xe8, 0xda, 0x21, 0xf8, 0x43, 0xe6, 0x00, 0x81, 0x36, 0xf8, 0xe0, 0x3a, 0x80, 0x6d, 0x54, 0x4c, - 0x77, 0xa2, 0xd6, 0xc3, 0x75, 0xed, 0x5b, 0x4c, 0xf7, 0x27, 0x19, 0xdf, 0x49, 0xb5, 0x73, 0xf9, - 0x5c, 0xed, 0xbc, 0x89, 0xb2, 0x1e, 0x95, 0xdd, 0x72, 0x46, 0xdd, 0x28, 0xc4, 0xd6, 0x03, 0x2a, - 0xbb, 0x44, 0xa1, 0xca, 0xca, 0x7d, 0xa9, 0x8a, 0x9b, 0xd3, 0x56, 0xee, 0x4b, 0xa2, 0xd0, 0xda, - 0x1f, 0x57, 0xd0, 0xfa, 0x23, 0xda, 0x67, 0xad, 0x4b, 0xbd, 0xbe, 0xd4, 0xeb, 0x05, 0xf5, 0x1a, - 0x5d, 0xea, 0xf5, 0x45, 0xf4, 0xba, 0x76, 0x62, 0xa0, 0xca, 0xdc, 0xac, 0xbd, 0x6c, 0x3d, 0xfd, - 0x7a, 0x4e, 0x4f, 0xdf, 0x5f, 0x7c, 0x84, 0xe6, 0x5e, 0x3f, 0xa7, 0xa8, 0x7f, 0x19, 0xa8, 0xf6, - 0xe2, 0x1c, 0x5f, 0x82, 0xa6, 0x0e, 0xa6, 0x35, 0xf5, 0xb3, 0xff, 0x90, 0xe0, 0x22, 0xaa, 0xfa, - 0x93, 0x81, 0xfe, 0x77, 0xc6, 0x3a, 0xc3, 0xaf, 0xa2, 0x4c, 0xe0, 0xf7, 0xf5, 0x5a, 0xce, 0x47, - 0xa1, 0x99, 0x79, 0x48, 0xf6, 0x48, 0x8c, 0x61, 0x8a, 0xf2, 0x22, 0x51, 0x06, 0x9d, 0xfe, 0x9d, - 0xc5, 0xdf, 0x38, 0x2b, 0x29, 0x8d, 0x52, 0x14, 0x9a, 0xf9, 0x11, 0x3a, 0xe2, 0xc5, 0x75, 0x54, - 0x70, 0x68, 0x23, 0x70, 0x5b, 0x5a, 0xd3, 0x56, 0x1a, 0x2b, 0x71, 0xb9, 0x76, 0xb6, 0x13, 0x8c, - 0xa4, 0xd6, 0xc6, 0xad, 0xe3, 0x93, 0xca, 0xd2, 0xb3, 0x93, 0xca, 0xd2, 0xf3, 0x93, 0xca, 0xd2, - 0x77, 0x51, 0xc5, 0x38, 0x8e, 0x2a, 0xc6, 0xb3, 0xa8, 0x62, 0x3c, 0x8f, 0x2a, 0xc6, 0x6f, 0x51, - 0xc5, 0xf8, 0xfe, 0xf7, 0xca, 0xd2, 0x17, 0x79, 0x1d, 0xff, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xcc, 0x27, 0xa9, 0x41, 0x2c, 0x0f, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_7f7c65a4f012fb19) } -func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { +var fileDescriptor_7f7c65a4f012fb19 = []byte{ + // 1957 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7, + 0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85, + 0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30, + 0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b, + 0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed, + 0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2, + 0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f, + 0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde, + 0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f, + 0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b, + 0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36, + 0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0, + 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, + 0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a, + 0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c, + 0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8, + 0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb, + 0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1, + 0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4, + 0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38, + 0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48, + 0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20, + 0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6, + 0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e, + 0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22, + 0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9, + 0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5, + 0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b, + 0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88, + 0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d, + 0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e, + 0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7, + 0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7, + 0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e, + 0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa, + 0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20, + 0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36, + 0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0, + 0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b, + 0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a, + 0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58, + 0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18, + 0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19, + 0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e, + 0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad, + 0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e, + 0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c, + 0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d, + 0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e, + 0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43, + 0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0, + 0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee, + 0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9, + 0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c, + 0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc, + 0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd, + 0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20, + 0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5, + 0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8, + 0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6, + 0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2, + 0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90, + 0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81, + 0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec, + 0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba, + 0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf, + 0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3, + 0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d, + 0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d, + 0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39, + 0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51, + 0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef, + 0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97, + 0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02, + 0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01, + 0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18, + 0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22, + 0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd, + 0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40, + 0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73, + 0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3, + 0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3, + 0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3, + 0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02, + 0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d, + 0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72, + 0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c, + 0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c, + 0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c, + 0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88, + 0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3, + 0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85, + 0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73, + 0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd, + 0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3, + 0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11, + 0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf, + 0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b, + 0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38, + 0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7, + 0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b, + 0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa, + 0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa, + 0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85, + 0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d, + 0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8, + 0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb, + 0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7, + 0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11, + 0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f, + 0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a, + 0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96, + 0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b, + 0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5, + 0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25, + 0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98, + 0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56, + 0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf, + 0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae, + 0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8, + 0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57, + 0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8, + 0x4a, 0x10, 0x21, 0x00, 0x00, +} + +func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -425,27 +915,222 @@ func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a + i -= len(m.ValueExpression) + copy(dAtA[i:], m.ValueExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Warning) + copy(dAtA[i:], m.Warning) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning))) + i-- + dAtA[i] = 0x1a + i -= len(m.FieldRef) + copy(dAtA[i:], m.FieldRef) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef))) + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func (m *MatchCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MatchResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MatchResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x3a + } + if len(m.ExcludeResourceRules) > 0 { + for iNdEx := len(m.ExcludeResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExcludeResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } if m.ReinvocationPolicy != nil { i -= len(*m.ReinvocationPolicy) @@ -627,7 +1312,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *Rule) Marshal() (dAtA []byte, err error) { +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -637,46 +1322,31 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Rule) MarshalTo(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Scope != nil { - i -= len(*m.Scope) - copy(dAtA[i:], *m.Scope) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i-- - dAtA[i] = 0x22 - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.APIVersions) > 0 { - for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIVersions[iNdEx]) - copy(dAtA[i:], m.APIVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) - i-- - dAtA[i] = 0x12 + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) i-- dAtA[i] = 0xa } @@ -684,7 +1354,7 @@ func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { +func (m *ParamKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -694,35 +1364,78 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) i-- dAtA[i] = 0x12 - if len(m.Operations) > 0 { - for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Operations[iNdEx]) - copy(dAtA[i:], m.Operations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) - i-- - dAtA[i] = 0xa + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ParamRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i-- + dAtA[i] = 0x22 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -771,7 +1484,7 @@ func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -781,91 +1494,108 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x52 } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -874,15 +1604,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -892,20 +1627,20 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -917,7 +1652,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, } } { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -929,7 +1664,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -939,12 +1674,73 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.MatchResources != nil { + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -976,7 +1772,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -986,33 +1782,94 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - if m.Service != nil { + if m.ParamKind != nil { { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1025,451 +1882,3276 @@ func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *MutatingWebhook) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *MutatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *MutatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Rule) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 } } - if m.Scope != nil { - l = len(*m.Scope) - n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 } - return n -} - -func (m *RuleWithOperations) Size() (n int) { - if m == nil { - return 0 + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 } - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - l = m.Rule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingWebhook) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 + if m.Reason != nil { + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + dAtA[offset] = uint8(v) + return base +} +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ValidatingWebhookConfiguration) Size() (n int) { +func (m *ExpressionWarning) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { +func (m *MatchCondition) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *WebhookClientConfig) Size() (n int) { +func (m *MatchResources) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.CABundle != nil { - l = len(m.CABundle) + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.URL != nil { - l = len(*m.URL) + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) n += 1 + l + sovGenerated(uint64(l)) } return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForRules += "}" - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForWebhooks := "[]MutatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems := "[]MutatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `Scope:` + valueToStringGenerated(this.Scope) + `,`, - `}`, - }, "") - return s -} -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingWebhook) String() string { - if this == nil { - return "nil" + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForRules += "}" - s := strings.Join([]string{`&ValidatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingWebhookConfiguration) String() string { - if this == nil { - return "nil" + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForWebhooks := "[]ValidatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]ValidatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRules += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, v11.RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuleWithOperations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RuleWithOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex]) + m.ParameterNotFoundAction = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeChecking) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{}) + if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1492,17 +5174,17 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1512,27 +5194,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1559,13 +5242,13 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1592,47 +5275,63 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1659,18 +5358,15 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1680,115 +5376,80 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s - iNdEx = postIndex - case 10: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1798,28 +5459,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) - m.ReinvocationPolicy = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1846,10 +5507,8 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1874,7 +5533,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1897,15 +5556,47 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1932,13 +5623,16 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1965,11 +5659,45 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, MutatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1991,7 +5719,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2014,10 +5742,10 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2082,7 +5810,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) + m.Items = append(m.Items, ValidatingAdmissionPolicy{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2108,7 +5836,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } return nil } -func (m *Rule) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2131,17 +5859,17 @@ func (m *Rule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2151,29 +5879,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2183,29 +5915,33 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2215,27 +5951,29 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + m.Validations = append(m.Validations, Validation{}) + if err := m.Validations[len(m.Validations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2263,64 +6001,48 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{}) + if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2330,27 +6052,29 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2377,7 +6101,8 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2402,7 +6127,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceReference) Unmarshal(dAtA []byte) error { +func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2425,17 +6150,17 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var stringLen uint64 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2445,29 +6170,16 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2477,62 +6189,33 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.TypeChecking == nil { + m.TypeChecking = &TypeChecking{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2542,12 +6225,26 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Port = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2692,7 +6389,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) + m.Rules = append(m.Rules, v11.RuleWithOperations{}) if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2768,9 +6465,247 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2780,50 +6715,30 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TimeoutSeconds = &v - case 8: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2833,29 +6748,81 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2865,28 +6832,28 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2913,10 +6880,8 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2941,7 +6906,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *Validation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2964,17 +6929,17 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: Validation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Validation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2984,30 +6949,29 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3017,25 +6981,88 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex]) + m.Reason = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.MessageExpression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3058,7 +7085,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *Variable) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3081,17 +7108,17 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: Variable: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3101,30 +7128,29 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3134,25 +7160,23 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Expression = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 2752f4fae..30f99f64d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -21,12 +21,187 @@ syntax = "proto2"; package k8s.io.api.admissionregistration.v1beta1; +import "k8s.io/api/admissionregistration/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/admissionregistration/v1beta1"; + +// AuditAnnotation describes how to produce an audit annotation for an API request. +message AuditAnnotation { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + optional string key = 1; + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + optional string valueExpression = 2; +} + +// ExpressionWarning is a warning information that targets a specific expression. +message ExpressionWarning { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + optional string fieldRef = 2; + + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + optional string warning = 3; +} + +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +message MatchCondition { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + optional string name = 1; + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + optional string expression = 2; +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +message MatchResources { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + repeated NamedRuleWithOperations resourceRules = 3; + + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + repeated NamedRuleWithOperations excludeResourceRules = 4; + + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + optional string matchPolicy = 7; +} // MutatingWebhook describes an admission webhook and the resources and operations it applies to. message MutatingWebhook { @@ -47,7 +222,8 @@ message MutatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated RuleWithOperations rules = 3; + // +listType=atomic + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -115,7 +291,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -129,7 +305,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -157,6 +333,7 @@ message MutatingWebhook { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic repeated string admissionReviewVersions = 8; // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -176,6 +353,25 @@ message MutatingWebhook { // Defaults to "Never". // +optional optional string reinvocationPolicy = 10; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 12; } // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. @@ -183,12 +379,14 @@ message MutatingWebhook { message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated MutatingWebhook Webhooks = 2; } @@ -197,67 +395,92 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; } -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -message Rule { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +message NamedRuleWithOperations { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + repeated string resourceNames = 1; + + // RuleWithOperations is a tuple of Operations and Resources. + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +message ParamKind { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". // Required. - repeated string apiGroups = 1; + optional string apiVersion = 1; - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. + // Kind is the API kind the resources belong to. // Required. - repeated string apiVersions = 2; + optional string kind = 2; +} - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +message ParamRef { + // name is the name of the resource being referenced. // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - repeated string resources = 3; + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + optional string name = 1; - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. // // +optional - optional string scope = 4; -} + optional string namespace = 2; -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -message RuleWithOperations { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * - // for all of those operations and any future admission operations that are added. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string operations = 1; + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - optional Rule rule = 2; + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + optional string parameterNotFoundAction = 4; } // ServiceReference holds a reference to Service.legacy.k8s.io @@ -282,6 +505,248 @@ message ServiceReference { optional int32 port = 4; } +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +message TypeChecking { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + repeated ExpressionWarning expressionWarnings = 1; +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +message ValidatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + optional ValidatingAdmissionPolicySpec spec = 2; + + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + optional ValidatingAdmissionPolicyStatus status = 3; +} + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message ValidatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + optional ValidatingAdmissionPolicyBindingSpec spec = 2; +} + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated ValidatingAdmissionPolicyBinding items = 2; +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +message ValidatingAdmissionPolicyBindingSpec { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + optional MatchResources matchResources = 3; + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + repeated string validationActions = 4; +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +message ValidatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated ValidatingAdmissionPolicy items = 2; +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +message ValidatingAdmissionPolicySpec { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + optional MatchResources matchConstraints = 2; + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + repeated Validation validations = 3; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 4; + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + repeated AuditAnnotation auditAnnotations = 5; + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated Variable variables = 7; +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +message ValidatingAdmissionPolicyStatus { + // The generation observed by the controller. + // +optional + optional int64 observedGeneration = 1; + + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + optional TypeChecking typeChecking = 2; + + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; +} + // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. message ValidatingWebhook { // The name of the admission webhook. @@ -301,7 +766,8 @@ message ValidatingWebhook { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated RuleWithOperations rules = 3; + // +listType=atomic + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -369,7 +835,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -383,7 +849,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -392,6 +858,7 @@ message ValidatingWebhook { // Requests with the dryRun attribute will be auto-rejected if they match a webhook with // sideEffects == Unknown or Some. Defaults to Unknown. // +optional + // +listType=atomic optional string sideEffects = 6; // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, @@ -411,7 +878,27 @@ message ValidatingWebhook { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic repeated string admissionReviewVersions = 8; + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 11; } // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. @@ -419,12 +906,14 @@ message ValidatingWebhook { message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated ValidatingWebhook Webhooks = 2; } @@ -433,12 +922,103 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; } +// Validation specifies the CEL expression which is used to apply the validation. +message Validation { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + optional string Expression = 1; + + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + optional string message = 2; + + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + optional string reason = 3; + + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + optional string messageExpression = 4; +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +message Variable { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + optional string Name = 1; + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + optional string Expression = 2; +} + // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go index 098744cf6..363233a2f 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingWebhookConfigurationList{}, &MutatingWebhookConfiguration{}, &MutatingWebhookConfigurationList{}, + &ValidatingAdmissionPolicy{}, + &ValidatingAdmissionPolicyList{}, + &ValidatingAdmissionPolicyBinding{}, + &ValidatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 630ea1f57..0f5903123 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -17,63 +17,37 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended // to make sure that all the tuple expansions are valid. -type Rule struct { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". - // - // +optional - Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` -} +type Rule = v1.Rule // ScopeType specifies a scope for a Rule. -type ScopeType string +type ScopeType = v1.ScopeType const ( // ClusterScope means that scope is limited to cluster-scoped objects. // Namespace objects are cluster-scoped. - ClusterScope ScopeType = "Cluster" + ClusterScope ScopeType = v1.ClusterScope // NamespacedScope means that scope is limited to namespaced objects. - NamespacedScope ScopeType = "Namespaced" + NamespacedScope ScopeType = v1.NamespacedScope // AllScopes means that all scopes are included. - AllScopes ScopeType = "*" + AllScopes ScopeType = v1.AllScopes +) + +// ParameterNotFoundActionType specifies a failure policy that defines how a binding +// is evaluated when the param referred by its perNamespaceParamRef is not found. +type ParameterNotFoundActionType string + +const ( + // Allow means all requests will be admitted if no param resources + // could be found. + AllowAction ParameterNotFoundActionType = "Allow" + // Deny means all requests will be denied if no param resources are found. + DenyAction ParameterNotFoundActionType = "Deny" ) // FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled. @@ -113,6 +87,584 @@ const ( SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. +type ValidatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicy. + Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy + // behaves in the expected way. + // Populated by the system. + // Read-only. + // +optional + Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy. +type ValidatingAdmissionPolicyStatus struct { + // The generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + // The results of type checking for each expression. + // Presence of this field indicates the completion of the type checking. + // +optional + TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"` + // The conditions represent the latest available observations of a policy's current state. + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// ValidatingAdmissionPolicyConditionType is the condition type of admission validation policy. +type ValidatingAdmissionPolicyConditionType string + +// TypeChecking contains results of type checking the expressions in the +// ValidatingAdmissionPolicy +type TypeChecking struct { + // The type checking warnings for each expression. + // +optional + // +listType=atomic + ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"` +} + +// ExpressionWarning is a warning information that targets a specific expression. +type ExpressionWarning struct { + // The path to the field that refers the expression. + // For example, the reference to the expression of the first item of + // validations is "spec.validations[0].expression" + FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"` + // The content of type checking information in a human-readable form. + // Each line of the warning contains the type that the expression is checked + // against, followed by the type check error from the compiler. + Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 +// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. +type ValidatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. +type ValidatingAdmissionPolicySpec struct { + // ParamKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // MatchConstraints specifies what resources this policy is designed to validate. + // The AdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // Validations contain CEL expressions which is used to apply the validation. + // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is + // required. + // +listType=atomic + // +optional + Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if spec.paramKind refers to a non-existent Kind. + // A binding is invalid if spec.paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions + // define how failures are enforced. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // auditAnnotations contains CEL expressions which are used to produce audit + // annotations for the audit event of the API request. + // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is + // required. + // +listType=atomic + // +optional + AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"` + + // MatchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // Variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except MatchConditions because MatchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, Variables must be sorted by the order of first appearance and acyclic. + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` +} + +// ParamKind is a tuple of Group Kind and Version. +// +structType=atomic +type ParamKind struct { + // APIVersion is the API group version the resources belong to. + // In format of "group/version". + // Required. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"` + + // Kind is the API kind the resources belong to. + // Required. + Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"` +} + +// Validation specifies the CEL expression which is used to apply the validation. +type Validation struct { + // Expression represents the expression which will be evaluated by CEL. + // ref: https://github.com/google/cel-spec + // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Accessible property names are escaped according to the following rules when accessed in the expression: + // - '__' escapes to '__underscores__' + // - '.' escapes to '__dot__' + // - '-' escapes to '__dash__' + // - '/' escapes to '__slash__' + // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: + // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", + // "import", "let", "loop", "package", "namespace", "return". + // Examples: + // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"} + // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"} + // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"} + // + // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. + // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: + // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and + // non-intersecting elements in `Y` are appended, retaining their partial order. + // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values + // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with + // non-intersecting keys are appended, retaining their partial order. + // Required. + Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"` + // Message represents the message displayed when validation fails. The message is required if the Expression contains + // line breaks. The message must not contain line breaks. + // If unset, the message is "failed rule: {Rule}". + // e.g. "must be a URL with the host matching spec.host" + // If the Expression contains line breaks. Message is required. + // The message must not contain line breaks. + // If unset, the message is "failed Expression: {Expression}". + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason represents a machine-readable description of why this validation failed. + // If this is the first validation in the list to fail, this reason, as well as the + // corresponding HTTP response code, are used in the + // HTTP response to the client. + // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". + // If not set, StatusReasonInvalid is used in the response to the client. + // +optional + Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. + // Since messageExpression is used as a failure message, it must evaluate to a string. + // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. + // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced + // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string + // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and + // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. + // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. + // Example: + // "object.x must be less than max ("+string(params.max)+")" + // +optional + MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"` +} + +// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression. +// +structType=atomic +type Variable struct { + // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. + // The variable can be accessed in other expressions through `variables` + // For example, if name is "foo", the variable will be available as `variables.foo` + Name string `json:"name" protobuf:"bytes,1,opt,name=Name"` + + // Expression is the expression that will be evaluated as the value of the variable. + // The CEL expression has access to the same identifiers as the CEL expressions in Validation. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"` +} + +// AuditAnnotation describes how to produce an audit annotation for an API request. +type AuditAnnotation struct { + // key specifies the audit annotation key. The audit annotation keys of + // a ValidatingAdmissionPolicy must be unique. The key must be a qualified + // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. + // + // The key is combined with the resource name of the + // ValidatingAdmissionPolicy to construct an audit annotation key: + // "{ValidatingAdmissionPolicy name}/{key}". + // + // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy + // and the same audit annotation key, the annotation key will be identical. + // In this case, the first annotation written with the key will be included + // in the audit event and all subsequent annotations with the same key + // will be discarded. + // + // Required. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // valueExpression represents the expression which is evaluated by CEL to + // produce an audit annotation value. The expression must evaluate to either + // a string or null value. If the expression evaluates to a string, the + // audit annotation is included with the string value. If the expression + // evaluates to null or empty string the audit annotation will be omitted. + // The valueExpression may be no longer than 5kb in length. + // If the result of the valueExpression is more than 10kb in length, it + // will be truncated to 10kb. + // + // If multiple ValidatingAdmissionPolicyBinding resources match an + // API request, then the valueExpression will be evaluated for + // each binding. All unique values produced by the valueExpressions + // will be joined together in a comma-separated list. + // + // Required. + ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 + +// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. +// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// +// The CEL expressions of a policy must have a computed CEL cost below the maximum +// CEL budget. Each evaluation of the policy is given an independent CEL cost budget. +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type ValidatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. + Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 + +// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. +type ValidatingAdmissionPolicyBindingSpec struct { + // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // MatchResources declares what resources match this binding and will be validated by it. + // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. + // If this is unset, all resources matched by the policy are validated by this binding + // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. + // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` + + // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. + // If a validation evaluates to false it is always enforced according to these actions. + // + // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according + // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are + // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. + // + // validationActions is declared as a set of action values. Order does + // not matter. validationActions may not contain duplicates of the same action. + // + // The supported actions values are: + // + // "Deny" specifies that a validation failure results in a denied request. + // + // "Warn" specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + // + // "Audit" specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failures, formatted as + // a JSON list of objects, each with the following fields: + // - message: The validation failure message string + // - policy: The resource name of the ValidatingAdmissionPolicy + // - binding: The resource name of the ValidatingAdmissionPolicyBinding + // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy + // - validationActions: The enforcement actions enacted for the validation failure + // Example audit annotation: + // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"` + // + // Clients should expect to handle additional values by ignoring + // any values not recognized. + // + // "Deny" and "Warn" may not be used together since this combination + // needlessly duplicates the validation failure both in the + // API response body and the HTTP warning headers. + // + // Required. + // +listType=set + ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"` +} + +// ParamRef describes how to locate the params to be used as input to +// expressions of rules applied by a policy binding. +// +structType=atomic +type ParamRef struct { + // name is the name of the resource being referenced. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // A single parameter used for all admission requests can be configured + // by setting the `name` field, leaving `selector` blank, and setting namespace + // if `paramKind` is namespace-scoped. + // + Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // namespace is the namespace of the referenced resource. Allows limiting + // the search for params to a specific namespace. Applies to both `name` and + // `selector` fields. + // + // A per-namespace parameter may be used by specifying a namespace-scoped + // `paramKind` in the policy and leaving this field empty. + // + // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this + // field results in a configuration error. + // + // - If `paramKind` is namespace-scoped, the namespace of the object being + // evaluated for admission will be used when this field is left unset. Take + // care that if this is left empty the binding must not match any cluster-scoped + // resources, which will result in an error. + // + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"` + + // selector can be used to match multiple param objects based on their labels. + // Supply selector: {} to match all resources of the ParamKind. + // + // If multiple params are found, they are all evaluated with the policy expressions + // and the results are ANDed together. + // + // One of `name` or `selector` must be set, but `name` and `selector` are + // mutually exclusive properties. If one is set, the other must be unset. + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"` + + // `parameterNotFoundAction` controls the behavior of the binding when the resource + // exists, and name or selector is valid, but there are no parameters + // matched by the binding. If the value is set to `Allow`, then no + // matched parameters will be treated as successful validation by the binding. + // If set to `Deny`, then no matched parameters will be subject to the + // `failurePolicy` of the policy. + // + // Allowed values are `Allow` or `Deny` + // + // Required + ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"` +} + +// MatchResources decides whether to run the admission control policy on an object based +// on whether it meets the match criteria. +// The exclude rules take precedence over include rules (if a resource matches both, it is excluded) +// +structType=atomic +type MatchResources struct { + // NamespaceSelector decides whether to run the admission control policy on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the policy. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the policy on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"` + // ObjectSelector decides whether to run the validation based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the cel validation, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"` + // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. + // The policy cares about an operation if it matches _any_ Rule. + // +listType=atomic + // +optional + ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"` + // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. + // The exclude rules take precedence over include rules (if a resource matches both, it is excluded) + // +listType=atomic + // +optional + ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"` + // matchPolicy defines how the "MatchResources" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy. + // + // Defaults to "Equivalent" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"` +} + +// ValidationAction specifies a policy enforcement action. +// +enum +type ValidationAction string + +const ( + // Deny specifies that a validation failure results in a denied request. + Deny ValidationAction = "Deny" + // Warn specifies that a validation failure is reported to the request client + // in HTTP Warning headers, with a warning code of 299. Warnings can be sent + // both for allowed or denied admission responses. + Warn ValidationAction = "Warn" + // Audit specifies that a validation failure is included in the published + // audit event for the request. The audit event will contain a + // `validation.policy.admission.k8s.io/validation_failure` audit annotation + // with a value containing the details of the validation failure. + Audit ValidationAction = "Audit" +) + +// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. +// +structType=atomic +type NamedRuleWithOperations struct { + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +listType=atomic + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"` + // RuleWithOperations is a tuple of Operations and Resources. + RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"` +} + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -132,6 +684,8 @@ type ValidatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } @@ -171,6 +725,8 @@ type MutatingWebhookConfiguration struct { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } @@ -210,6 +766,7 @@ type ValidatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -301,6 +858,7 @@ type ValidatingWebhook struct { // Requests with the dryRun attribute will be auto-rejected if they match a webhook with // sideEffects == Unknown or Some. Defaults to Unknown. // +optional + // +listType=atomic SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, @@ -320,7 +878,27 @@ type ValidatingWebhook struct { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=matchConditions"` } // MutatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -342,6 +920,7 @@ type MutatingWebhook struct { // from putting the cluster in a state which cannot be recovered from without completely // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + // +listType=atomic Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - @@ -452,6 +1031,7 @@ type MutatingWebhook struct { // and be subject to the failure policy. // Default to `['v1beta1']`. // +optional + // +listType=atomic AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. @@ -471,6 +1051,25 @@ type MutatingWebhook struct { // Defaults to "Never". // +optional ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` + + // MatchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. Match conditions filter requests that have already been matched by the rules, + // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the webhook is called. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"` } // ReinvocationPolicyType specifies what type of policy the admission hook uses. @@ -488,27 +1087,19 @@ const ( // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make // sure that all the tuple expansions are valid. -type RuleWithOperations struct { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * - // for all of those operations and any future admission operations that are added. - // If '*' is present, the length of the slice must be one. - // Required. - Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` -} +type RuleWithOperations = v1.RuleWithOperations // OperationType specifies an operation for a request. -type OperationType string +// +enum +type OperationType = v1.OperationType // The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. const ( - OperationAll OperationType = "*" - Create OperationType = "CREATE" - Update OperationType = "UPDATE" - Delete OperationType = "DELETE" - Connect OperationType = "CONNECT" + OperationAll OperationType = v1.OperationAll + Create OperationType = v1.Create + Update OperationType = v1.Update + Delete OperationType = v1.Delete + Connect OperationType = v1.Connect ) // WebhookClientConfig contains the information to make a TLS @@ -577,3 +1168,32 @@ type ServiceReference struct { // +optional Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } + +// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook. +type MatchCondition struct { + // Name is an identifier for this match condition, used for strategic merging of MatchConditions, + // as well as providing an identifier for logging purposes. A good name should be descriptive of + // the associated expression. + // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and + // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or + // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an + // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') + // + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: + // + // 'object' - The object from the incoming request. The value is null for DELETE requests. + // 'oldObject' - The existing object. The value is null for CREATE requests. + // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). + // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Required. + Expression string `json:"expression" protobuf:"bytes,2,opt,name=expression"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index 314c3afae..cc1509b53 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,52 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditAnnotation = map[string]string{ + "": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", +} + +func (AuditAnnotation) SwaggerDoc() map[string]string { + return map_AuditAnnotation +} + +var map_ExpressionWarning = map[string]string{ + "": "ExpressionWarning is a warning information that targets a specific expression.", + "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", +} + +func (ExpressionWarning) SwaggerDoc() map[string]string { + return map_ExpressionWarning +} + +var map_MatchCondition = map[string]string{ + "": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", + "name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "expression": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", +} + +func (MatchCondition) SwaggerDoc() map[string]string { + return map_MatchCondition +} + +var map_MatchResources = map[string]string{ + "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", +} + +func (MatchResources) SwaggerDoc() map[string]string { + return map_MatchResources +} + var map_MutatingWebhook = map[string]string{ "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -40,6 +83,7 @@ var map_MutatingWebhook = map[string]string{ "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (MutatingWebhook) SwaggerDoc() map[string]string { @@ -66,25 +110,35 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_MutatingWebhookConfigurationList } -var map_Rule = map[string]string{ - "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", - "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", - "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", - "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", - "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", +var map_NamedRuleWithOperations = map[string]string{ + "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", } -func (Rule) SwaggerDoc() map[string]string { - return map_Rule +func (NamedRuleWithOperations) SwaggerDoc() map[string]string { + return map_NamedRuleWithOperations } -var map_RuleWithOperations = map[string]string{ - "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", - "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", +var map_ParamKind = map[string]string{ + "": "ParamKind is a tuple of Group Kind and Version.", + "apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "kind": "Kind is the API kind the resources belong to. Required.", } -func (RuleWithOperations) SwaggerDoc() map[string]string { - return map_RuleWithOperations +func (ParamKind) SwaggerDoc() map[string]string { + return map_ParamKind +} + +var map_ParamRef = map[string]string{ + "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "name": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", + "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", +} + +func (ParamRef) SwaggerDoc() map[string]string { + return map_ParamRef } var map_ServiceReference = map[string]string{ @@ -99,6 +153,94 @@ func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } +var map_TypeChecking = map[string]string{ + "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "expressionWarnings": "The type checking warnings for each expression.", +} + +func (TypeChecking) SwaggerDoc() map[string]string { + return map_TypeChecking +} + +var map_ValidatingAdmissionPolicy = map[string]string{ + "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", +} + +func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicy +} + +var map_ValidatingAdmissionPolicyBinding = map[string]string{ + "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", +} + +func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBinding +} + +var map_ValidatingAdmissionPolicyBindingList = map[string]string{ + "": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingList +} + +var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{ + "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", +} + +func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyBindingSpec +} + +var map_ValidatingAdmissionPolicyList = map[string]string{ + "": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyList +} + +var map_ValidatingAdmissionPolicySpec = map[string]string{ + "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", +} + +func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicySpec +} + +var map_ValidatingAdmissionPolicyStatus = map[string]string{ + "": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "observedGeneration": "The generation observed by the controller.", + "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "conditions": "The conditions represent the latest available observations of a policy's current state.", +} + +func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string { + return map_ValidatingAdmissionPolicyStatus +} + var map_ValidatingWebhook = map[string]string{ "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", @@ -111,6 +253,7 @@ var map_ValidatingWebhook = map[string]string{ "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped", } func (ValidatingWebhook) SwaggerDoc() map[string]string { @@ -137,6 +280,28 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } +var map_Validation = map[string]string{ + "": "Validation specifies the CEL expression which is used to apply the validation.", + "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", +} + +func (Validation) SwaggerDoc() map[string]string { + return map_Validation +} + +var map_Variable = map[string]string{ + "": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", +} + +func (Variable) SwaggerDoc() map[string]string { + return map_Variable +} + var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index 23ddb1fa2..4c10b1d11 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -22,17 +22,111 @@ limitations under the License. package v1beta1 import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation. +func (in *AuditAnnotation) DeepCopy() *AuditAnnotation { + if in == nil { + return nil + } + out := new(AuditAnnotation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning. +func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { + if in == nil { + return nil + } + out := new(ExpressionWarning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition. +func (in *MatchCondition) DeepCopy() *MatchCondition { + if in == nil { + return nil + } + out := new(MatchCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MatchResources) DeepCopyInto(out *MatchResources) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExcludeResourceRules != nil { + in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules + *out = make([]NamedRuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources. +func (in *MatchResources) DeepCopy() *MatchResources { + if in == nil { + return nil + } + out := new(MatchResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = *in in.ClientConfig.DeepCopyInto(&out.ClientConfig) if in.Rules != nil { in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) + *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -77,6 +171,11 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { *out = new(ReinvocationPolicyType) **out = **in } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -157,59 +256,65 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { +func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { *out = *in - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIVersions != nil { - in, out := &in.APIVersions, &out.APIVersions + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames *out = make([]string, len(*in)) copy(*out, *in) } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Scope != nil { - in, out := &in.Scope, &out.Scope - *out = new(ScopeType) - **out = **in + in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations. +func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations { + if in == nil { + return nil } + out := new(NamedRuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamKind) DeepCopyInto(out *ParamKind) { + *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind. +func (in *ParamKind) DeepCopy() *ParamKind { if in == nil { return nil } - out := new(Rule) + out := new(ParamKind) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { +func (in *ParamRef) DeepCopyInto(out *ParamRef) { *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations - *out = make([]OperationType, len(*in)) - copy(*out, *in) + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ParameterNotFoundAction != nil { + in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction + *out = new(ParameterNotFoundActionType) + **out = **in } - in.Rule.DeepCopyInto(&out.Rule) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. -func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef. +func (in *ParamRef) DeepCopy() *ParamRef { if in == nil { return nil } - out := new(RuleWithOperations) + out := new(ParamRef) in.DeepCopyInto(out) return out } @@ -240,13 +345,267 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypeChecking) DeepCopyInto(out *TypeChecking) { + *out = *in + if in.ExpressionWarnings != nil { + in, out := &in.ExpressionWarnings, &out.ExpressionWarnings + *out = make([]ExpressionWarning, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking. +func (in *TypeChecking) DeepCopy() *TypeChecking { + if in == nil { + return nil + } + out := new(TypeChecking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy. +func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding. +func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.ValidationActions != nil { + in, out := &in.ValidationActions, &out.ValidationActions + *out = make([]ValidationAction, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec. +func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList. +func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Validations != nil { + in, out := &in.Validations, &out.Validations + *out = make([]Validation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make([]AuditAnnotation, len(*in)) + copy(*out, *in) + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec. +func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) { + *out = *in + if in.TypeChecking != nil { + in, out := &in.TypeChecking, &out.TypeChecking + *out = new(TypeChecking) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus. +func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus { + if in == nil { + return nil + } + out := new(ValidatingAdmissionPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = *in in.ClientConfig.DeepCopyInto(&out.ClientConfig) if in.Rules != nil { in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) + *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -286,6 +645,11 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } return } @@ -365,6 +729,43 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Validation) DeepCopyInto(out *Validation) { + *out = *in + if in.Reason != nil { + in, out := &in.Reason, &out.Reason + *out = new(v1.StatusReason) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation. +func (in *Validation) DeepCopy() *Validation { + if in == nil { + return nil + } + out := new(Validation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go index 09a92f476..c1be5122a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go @@ -73,6 +73,78 @@ func (in *MutatingWebhookConfigurationList) APILifecycleRemoved() (major, minor return 1, 22 } +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 31 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 34 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/apidiscovery/v2/doc.go b/vendor/k8s.io/api/apidiscovery/v2/doc.go new file mode 100644 index 000000000..4f3ad5f13 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=apidiscovery.k8s.io + +package v2 // import "k8s.io/api/apidiscovery/v2" diff --git a/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go b/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go new file mode 100644 index 000000000..5c37feaa2 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go @@ -0,0 +1,1742 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/apidiscovery/v2/generated.proto + +package v2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} } +func (*APIGroupDiscovery) ProtoMessage() {} +func (*APIGroupDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{0} +} +func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscovery.Merge(m, src) +} +func (m *APIGroupDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo + +func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} } +func (*APIGroupDiscoveryList) ProtoMessage() {} +func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{1} +} +func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscoveryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscoveryList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscoveryList.Merge(m, src) +} +func (m *APIGroupDiscoveryList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscoveryList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscoveryList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo + +func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} } +func (*APIResourceDiscovery) ProtoMessage() {} +func (*APIResourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{2} +} +func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceDiscovery.Merge(m, src) +} +func (m *APIResourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIResourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo + +func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} } +func (*APISubresourceDiscovery) ProtoMessage() {} +func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{3} +} +func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APISubresourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APISubresourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APISubresourceDiscovery.Merge(m, src) +} +func (m *APISubresourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APISubresourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APISubresourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo + +func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} } +func (*APIVersionDiscovery) ProtoMessage() {} +func (*APIVersionDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b7287280068d8f, []int{4} +} +func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersionDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersionDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersionDiscovery.Merge(m, src) +} +func (m *APIVersionDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIVersionDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersionDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersionDiscovery proto.InternalMessageInfo + +func init() { + proto.RegisterType((*APIGroupDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIGroupDiscovery") + proto.RegisterType((*APIGroupDiscoveryList)(nil), "k8s.io.api.apidiscovery.v2.APIGroupDiscoveryList") + proto.RegisterType((*APIResourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIResourceDiscovery") + proto.RegisterType((*APISubresourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APISubresourceDiscovery") + proto.RegisterType((*APIVersionDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIVersionDiscovery") +} + +func init() { + proto.RegisterFile("k8s.io/api/apidiscovery/v2/generated.proto", fileDescriptor_e0b7287280068d8f) +} + +var fileDescriptor_e0b7287280068d8f = []byte{ + // 736 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0x8d, 0x09, 0xf9, 0x48, 0x26, 0xc9, 0xd7, 0x30, 0x80, 0x6a, 0x65, 0xe1, 0xa0, 0x6c, 0x4a, + 0xab, 0x32, 0x86, 0x94, 0xa2, 0x2e, 0x9b, 0x94, 0xb6, 0x8a, 0xfa, 0x87, 0x26, 0x15, 0x8b, 0xaa, + 0x95, 0xea, 0x38, 0x83, 0xe3, 0x82, 0x7f, 0x34, 0xe3, 0x44, 0x62, 0xd7, 0x47, 0xe8, 0x13, 0xf4, + 0x79, 0xe8, 0x8e, 0x05, 0x0b, 0x56, 0x51, 0x49, 0x77, 0x7d, 0x04, 0x56, 0xd5, 0x8c, 0xc7, 0x3f, + 0x21, 0x44, 0x41, 0x5d, 0x74, 0x81, 0x84, 0xcf, 0x9c, 0x73, 0xee, 0x3d, 0xd7, 0xd7, 0x13, 0xf0, + 0xe0, 0xe8, 0x09, 0x43, 0xb6, 0xa7, 0x1b, 0xbe, 0xcd, 0xff, 0x7a, 0x36, 0x33, 0xbd, 0x21, 0xa1, + 0x27, 0xfa, 0xb0, 0xa1, 0x5b, 0xc4, 0x25, 0xd4, 0x08, 0x48, 0x0f, 0xf9, 0xd4, 0x0b, 0x3c, 0x58, + 0x0d, 0xb9, 0xc8, 0xf0, 0x6d, 0x94, 0xe6, 0xa2, 0x61, 0xa3, 0xba, 0x69, 0xd9, 0x41, 0x7f, 0xd0, + 0x45, 0xa6, 0xe7, 0xe8, 0x96, 0x67, 0x79, 0xba, 0x90, 0x74, 0x07, 0x87, 0xe2, 0x49, 0x3c, 0x88, + 0xff, 0x42, 0xab, 0xea, 0x4e, 0x52, 0xd6, 0x31, 0xcc, 0xbe, 0xed, 0xf2, 0x92, 0xfe, 0x91, 0xc5, + 0x01, 0xa6, 0x3b, 0x24, 0x30, 0xf4, 0xe1, 0xf6, 0xf5, 0x06, 0xaa, 0xfa, 0x2c, 0x15, 0x1d, 0xb8, + 0x81, 0xed, 0x90, 0x29, 0xc1, 0xee, 0x3c, 0x01, 0x33, 0xfb, 0xc4, 0x31, 0xae, 0xeb, 0xea, 0xe7, + 0x0a, 0x58, 0x6e, 0xee, 0xb7, 0x5f, 0x52, 0x6f, 0xe0, 0xef, 0x45, 0x31, 0xe1, 0x67, 0x90, 0xe7, + 0x9d, 0xf5, 0x8c, 0xc0, 0x50, 0x95, 0x75, 0x65, 0xa3, 0xd8, 0xd8, 0x42, 0xc9, 0x48, 0xe2, 0x02, + 0xc8, 0x3f, 0xb2, 0x38, 0xc0, 0x10, 0x67, 0xa3, 0xe1, 0x36, 0x7a, 0xd7, 0xfd, 0x42, 0xcc, 0xe0, + 0x0d, 0x09, 0x8c, 0x16, 0x3c, 0x1d, 0xd5, 0x32, 0xe3, 0x51, 0x0d, 0x24, 0x18, 0x8e, 0x5d, 0xe1, + 0x27, 0x90, 0x1f, 0x12, 0xca, 0x6c, 0xcf, 0x65, 0xea, 0xc2, 0x7a, 0x76, 0xa3, 0xd8, 0xd0, 0xd1, + 0xec, 0xa1, 0xa3, 0xe6, 0x7e, 0xfb, 0x20, 0xa4, 0xc7, 0x4d, 0xb6, 0x2a, 0xb2, 0x40, 0x5e, 0x9e, + 0x30, 0x1c, 0x5b, 0xd6, 0x7f, 0x28, 0x60, 0x6d, 0x2a, 0xd6, 0x6b, 0x9b, 0x05, 0xf0, 0xe3, 0x54, + 0x34, 0x74, 0xbb, 0x68, 0x5c, 0x2d, 0x82, 0xc5, 0x75, 0x23, 0x24, 0x15, 0x0b, 0x83, 0x9c, 0x1d, + 0x10, 0x27, 0xca, 0xb4, 0x39, 0x27, 0xd3, 0x64, 0x7f, 0xad, 0xb2, 0x74, 0xce, 0xb5, 0xb9, 0x07, + 0x0e, 0xad, 0xea, 0xdf, 0x17, 0xc1, 0x6a, 0x73, 0xbf, 0x8d, 0x09, 0xf3, 0x06, 0xd4, 0x24, 0xc9, + 0x5b, 0x7a, 0x08, 0xf2, 0x54, 0x82, 0x22, 0x4a, 0x21, 0x69, 0x2d, 0x22, 0xe3, 0x98, 0x01, 0x8f, + 0x41, 0x89, 0x12, 0xe6, 0x7b, 0x2e, 0x23, 0xaf, 0x6c, 0xb7, 0xa7, 0x2e, 0x88, 0xf0, 0xbb, 0xb7, + 0x0b, 0x2f, 0x1a, 0x95, 0x73, 0xe6, 0xea, 0x56, 0x65, 0x3c, 0xaa, 0x95, 0x70, 0xca, 0x0f, 0x4f, + 0xb8, 0xc3, 0x1d, 0x90, 0x63, 0xa6, 0xe7, 0x13, 0x35, 0x2b, 0x1a, 0xd3, 0xa2, 0x64, 0x1d, 0x0e, + 0x5e, 0x8d, 0x6a, 0xe5, 0xa8, 0x43, 0x01, 0xe0, 0x90, 0x0c, 0xf7, 0x40, 0x85, 0xd9, 0xae, 0x35, + 0x38, 0x36, 0x68, 0x74, 0xae, 0x2e, 0x0a, 0x03, 0x55, 0x1a, 0x54, 0x3a, 0xd7, 0xce, 0xf1, 0x94, + 0x02, 0xd6, 0x40, 0x6e, 0x48, 0x68, 0x97, 0xa9, 0xb9, 0xf5, 0xec, 0x46, 0xa1, 0x55, 0xe0, 0x75, + 0x0f, 0x38, 0x80, 0x43, 0x1c, 0x22, 0x00, 0x58, 0xdf, 0xa3, 0xc1, 0x5b, 0xc3, 0x21, 0x4c, 0xfd, + 0x4f, 0xb0, 0xfe, 0xe7, 0xab, 0xda, 0x89, 0x51, 0x9c, 0x62, 0x70, 0xbe, 0x69, 0x04, 0xc4, 0xf2, + 0xa8, 0x4d, 0x98, 0xba, 0x94, 0xf0, 0x9f, 0xc5, 0x28, 0x4e, 0x31, 0xa0, 0x03, 0x4a, 0x6c, 0xd0, + 0x8d, 0x26, 0xcf, 0xd4, 0xbc, 0x58, 0x86, 0x47, 0x73, 0x96, 0xa1, 0x93, 0x48, 0x92, 0x95, 0x58, + 0x95, 0xb9, 0x4b, 0xa9, 0x53, 0x86, 0x27, 0xec, 0xeb, 0xe7, 0x0b, 0xe0, 0xee, 0x0c, 0x3d, 0x7c, + 0x0c, 0x8a, 0x29, 0xae, 0x5c, 0x93, 0x15, 0x69, 0x5a, 0x4c, 0x49, 0x70, 0x9a, 0xf7, 0x8f, 0x97, + 0x85, 0x81, 0xb2, 0x61, 0x9a, 0xc4, 0x0f, 0x48, 0xef, 0xfd, 0x89, 0x4f, 0x98, 0x9a, 0x15, 0x03, + 0xfb, 0xdb, 0x72, 0x6b, 0x32, 0x5e, 0xb9, 0x99, 0x36, 0xc5, 0x93, 0x35, 0x92, 0x2d, 0x59, 0xbc, + 0x79, 0x4b, 0xea, 0xbf, 0x15, 0xb0, 0x72, 0xc3, 0xbd, 0x03, 0xef, 0x83, 0x25, 0x79, 0xcf, 0xc8, + 0x71, 0xde, 0x91, 0xf5, 0x96, 0x24, 0x15, 0x47, 0xe7, 0xd0, 0x00, 0x85, 0x64, 0x0b, 0xc2, 0x2b, + 0x61, 0x6b, 0xce, 0x16, 0x4c, 0x7d, 0xe6, 0xad, 0x65, 0x69, 0x5f, 0xc0, 0xf1, 0xfb, 0x4f, 0x5c, + 0xe1, 0x73, 0x50, 0x38, 0xa4, 0x84, 0xf5, 0x5d, 0xc2, 0x98, 0xfc, 0xd8, 0xee, 0x45, 0x82, 0x17, + 0xd1, 0xc1, 0xd5, 0xa8, 0x06, 0x63, 0xc3, 0x18, 0xc5, 0x89, 0xb2, 0xf5, 0xf4, 0xf4, 0x52, 0xcb, + 0x9c, 0x5d, 0x6a, 0x99, 0x8b, 0x4b, 0x2d, 0xf3, 0x75, 0xac, 0x29, 0xa7, 0x63, 0x4d, 0x39, 0x1b, + 0x6b, 0xca, 0xc5, 0x58, 0x53, 0x7e, 0x8e, 0x35, 0xe5, 0xdb, 0x2f, 0x2d, 0xf3, 0xa1, 0x3a, 0xfb, + 0x37, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x35, 0x6a, 0x0f, 0x60, 0x07, 0x00, 0x00, +} + +func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIGroupDiscoveryList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscoveryList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscoveryList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIResourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Subresources) > 0 { + for iNdEx := len(m.Subresources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subresources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.SingularResource) + copy(dAtA[i:], m.SingularResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularResource))) + i-- + dAtA[i] = 0x22 + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x1a + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APISubresourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APISubresourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APISubresourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.AcceptedTypes) > 0 { + for iNdEx := len(m.AcceptedTypes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AcceptedTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIVersionDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIVersionDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersionDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Freshness) + copy(dAtA[i:], m.Freshness) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Freshness))) + i-- + dAtA[i] = 0x1a + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APIGroupDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupDiscoveryList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SingularResource) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Subresources) > 0 { + for _, e := range m.Subresources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APISubresourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AcceptedTypes) > 0 { + for _, e := range m.AcceptedTypes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersionDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Freshness) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroupDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]APIVersionDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "APIVersionDiscovery", "APIVersionDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&APIGroupDiscovery{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupDiscoveryList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]APIGroupDiscovery{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIGroupDiscovery", "APIGroupDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&APIGroupDiscoveryList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubresources := "[]APISubresourceDiscovery{" + for _, f := range this.Subresources { + repeatedStringForSubresources += strings.Replace(strings.Replace(f.String(), "APISubresourceDiscovery", "APISubresourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForSubresources += "}" + s := strings.Join([]string{`&APIResourceDiscovery{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `SingularResource:` + fmt.Sprintf("%v", this.SingularResource) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Subresources:` + repeatedStringForSubresources + `,`, + `}`, + }, "") + return s +} +func (this *APISubresourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForAcceptedTypes := "[]GroupVersionKind{" + for _, f := range this.AcceptedTypes { + repeatedStringForAcceptedTypes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAcceptedTypes += "}" + s := strings.Join([]string{`&APISubresourceDiscovery{`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `AcceptedTypes:` + repeatedStringForAcceptedTypes + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `}`, + }, "") + return s +} +func (this *APIVersionDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]APIResourceDiscovery{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "APIResourceDiscovery", "APIResourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&APIVersionDiscovery{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Freshness:` + fmt.Sprintf("%v", this.Freshness) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroupDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, APIVersionDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupDiscoveryList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscoveryList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscoveryList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, APIGroupDiscovery{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresources = append(m.Subresources, APISubresourceDiscovery{}) + if err := m.Subresources[len(m.Subresources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APISubresourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APISubresourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APISubresourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AcceptedTypes = append(m.AcceptedTypes, v1.GroupVersionKind{}) + if err := m.AcceptedTypes[len(m.AcceptedTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersionDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersionDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersionDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, APIResourceDiscovery{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Freshness", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Freshness = DiscoveryFreshness(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apidiscovery/v2/generated.proto b/vendor/k8s.io/api/apidiscovery/v2/generated.proto new file mode 100644 index 000000000..62f2d7f2c --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/generated.proto @@ -0,0 +1,156 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.apidiscovery.v2; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/apidiscovery/v2"; + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +message APIGroupDiscovery { + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + repeated APIVersionDiscovery versions = 2; +} + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +message APIGroupDiscoveryList { + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of groups for discovery. The groups are listed in priority order. + repeated APIGroupDiscovery items = 2; +} + +// APIResourceDiscovery provides information about an API resource for discovery. +message APIResourceDiscovery { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + optional string resource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // scope indicates the scope of a resource, either Cluster or Namespaced + optional string scope = 3; + + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + optional string singularResource = 4; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + repeated string verbs = 5; + + // shortNames is a list of suggested short names of the resource. + // +listType=set + repeated string shortNames = 6; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + repeated string categories = 7; + + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + repeated APISubresourceDiscovery subresources = 8; +} + +// APISubresourceDiscovery provides information about an API subresource for discovery. +message APISubresourceDiscovery { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + optional string subresource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + repeated string verbs = 4; +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +message APIVersionDiscovery { + // version is the name of the version within a group version. + optional string version = 1; + + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + repeated APIResourceDiscovery resources = 2; + + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + optional string freshness = 3; +} + diff --git a/vendor/k8s.io/api/apidiscovery/v2/register.go b/vendor/k8s.io/api/apidiscovery/v2/register.go new file mode 100644 index 000000000..dd759defc --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "apidiscovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &APIGroupDiscoveryList{}, + &APIGroupDiscovery{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apidiscovery/v2/types.go b/vendor/k8s.io/api/apidiscovery/v2/types.go new file mode 100644 index 000000000..449679b61 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/types.go @@ -0,0 +1,157 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +type APIGroupDiscoveryList struct { + v1.TypeMeta `json:",inline"` + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of groups for discovery. The groups are listed in priority order. + Items []APIGroupDiscovery `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +type APIGroupDiscovery struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + Versions []APIVersionDiscovery `json:"versions,omitempty" protobuf:"bytes,2,rep,name=versions"` +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +type APIVersionDiscovery struct { + // version is the name of the version within a group version. + Version string `json:"version" protobuf:"bytes,1,opt,name=version"` + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + Resources []APIResourceDiscovery `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + Freshness DiscoveryFreshness `json:"freshness,omitempty" protobuf:"bytes,3,opt,name=freshness"` +} + +// APIResourceDiscovery provides information about an API resource for discovery. +type APIResourceDiscovery struct { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + Resource string `json:"resource" protobuf:"bytes,1,opt,name=resource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // scope indicates the scope of a resource, either Cluster or Namespaced + Scope ResourceScope `json:"scope" protobuf:"bytes,3,opt,name=scope"` + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + SingularResource string `json:"singularResource" protobuf:"bytes,4,opt,name=singularResource"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,5,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + // +listType=set + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,6,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + Subresources []APISubresourceDiscovery `json:"subresources,omitempty" protobuf:"bytes,8,rep,name=subresources"` +} + +// ResourceScope is an enum defining the different scopes available to a resource. +type ResourceScope string + +const ( + ScopeCluster ResourceScope = "Cluster" + ScopeNamespace ResourceScope = "Namespaced" +) + +// DiscoveryFreshness is an enum defining whether the Discovery document published by an apiservice is up to date (fresh). +type DiscoveryFreshness string + +const ( + DiscoveryFreshnessCurrent DiscoveryFreshness = "Current" + DiscoveryFreshnessStale DiscoveryFreshness = "Stale" +) + +// APISubresourceDiscovery provides information about an API subresource for discovery. +type APISubresourceDiscovery struct { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + Subresource string `json:"subresource" protobuf:"bytes,1,opt,name=subresource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + AcceptedTypes []v1.GroupVersionKind `json:"acceptedTypes,omitempty" protobuf:"bytes,3,rep,name=acceptedTypes"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` +} diff --git a/vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go new file mode 100644 index 000000000..029aeeab8 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go @@ -0,0 +1,190 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscovery) DeepCopyInto(out *APIGroupDiscovery) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersionDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscovery. +func (in *APIGroupDiscovery) DeepCopy() *APIGroupDiscovery { + if in == nil { + return nil + } + out := new(APIGroupDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscovery) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscoveryList) DeepCopyInto(out *APIGroupDiscoveryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIGroupDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscoveryList. +func (in *APIGroupDiscoveryList) DeepCopy() *APIGroupDiscoveryList { + if in == nil { + return nil + } + out := new(APIGroupDiscoveryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscoveryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceDiscovery) DeepCopyInto(out *APIResourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = make([]APISubresourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceDiscovery. +func (in *APIResourceDiscovery) DeepCopy() *APIResourceDiscovery { + if in == nil { + return nil + } + out := new(APIResourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISubresourceDiscovery) DeepCopyInto(out *APISubresourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.AcceptedTypes != nil { + in, out := &in.AcceptedTypes, &out.AcceptedTypes + *out = make([]v1.GroupVersionKind, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISubresourceDiscovery. +func (in *APISubresourceDiscovery) DeepCopy() *APISubresourceDiscovery { + if in == nil { + return nil + } + out := new(APISubresourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersionDiscovery) DeepCopyInto(out *APIVersionDiscovery) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersionDiscovery. +func (in *APIVersionDiscovery) DeepCopy() *APIVersionDiscovery { + if in == nil { + return nil + } + out := new(APIVersionDiscovery) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b7132c647 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go similarity index 68% rename from vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go rename to vendor/k8s.io/api/apidiscovery/v2beta1/doc.go index 19ab77614..e85da226e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,10 +15,10 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta +// +k8s:prerelease-lifecycle-gen=true -// +groupName=client.authentication.k8s.io +// +groupName=apidiscovery.k8s.io -package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" +package v2beta1 // import "k8s.io/api/apidiscovery/v2beta1" diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go new file mode 100644 index 000000000..398c5f94f --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go @@ -0,0 +1,1743 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/apidiscovery/v2beta1/generated.proto + +package v2beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} } +func (*APIGroupDiscovery) ProtoMessage() {} +func (*APIGroupDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_48661e6ba3d554f3, []int{0} +} +func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscovery.Merge(m, src) +} +func (m *APIGroupDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo + +func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} } +func (*APIGroupDiscoveryList) ProtoMessage() {} +func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) { + return fileDescriptor_48661e6ba3d554f3, []int{1} +} +func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupDiscoveryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupDiscoveryList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupDiscoveryList.Merge(m, src) +} +func (m *APIGroupDiscoveryList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupDiscoveryList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupDiscoveryList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo + +func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} } +func (*APIResourceDiscovery) ProtoMessage() {} +func (*APIResourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_48661e6ba3d554f3, []int{2} +} +func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceDiscovery.Merge(m, src) +} +func (m *APIResourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIResourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo + +func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} } +func (*APISubresourceDiscovery) ProtoMessage() {} +func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_48661e6ba3d554f3, []int{3} +} +func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APISubresourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APISubresourceDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APISubresourceDiscovery.Merge(m, src) +} +func (m *APISubresourceDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APISubresourceDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APISubresourceDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo + +func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} } +func (*APIVersionDiscovery) ProtoMessage() {} +func (*APIVersionDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_48661e6ba3d554f3, []int{4} +} +func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersionDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersionDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersionDiscovery.Merge(m, src) +} +func (m *APIVersionDiscovery) XXX_Size() int { + return m.Size() +} +func (m *APIVersionDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersionDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersionDiscovery proto.InternalMessageInfo + +func init() { + proto.RegisterType((*APIGroupDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIGroupDiscovery") + proto.RegisterType((*APIGroupDiscoveryList)(nil), "k8s.io.api.apidiscovery.v2beta1.APIGroupDiscoveryList") + proto.RegisterType((*APIResourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIResourceDiscovery") + proto.RegisterType((*APISubresourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APISubresourceDiscovery") + proto.RegisterType((*APIVersionDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIVersionDiscovery") +} + +func init() { + proto.RegisterFile("k8s.io/api/apidiscovery/v2beta1/generated.proto", fileDescriptor_48661e6ba3d554f3) +} + +var fileDescriptor_48661e6ba3d554f3 = []byte{ + // 740 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4a, + 0x18, 0x8d, 0x09, 0xb9, 0x24, 0x93, 0xe4, 0xde, 0x30, 0x80, 0xae, 0xc5, 0xc2, 0x46, 0xd9, 0x5c, + 0xae, 0xd4, 0x8e, 0x4b, 0x04, 0x88, 0x6d, 0x52, 0x68, 0x15, 0xf5, 0x0f, 0x4d, 0x2a, 0x2a, 0x55, + 0x5d, 0xd4, 0x71, 0x06, 0xc7, 0x85, 0xd8, 0xd6, 0xcc, 0x24, 0x12, 0xbb, 0x3e, 0x42, 0xdf, 0xa1, + 0x2f, 0xc3, 0xaa, 0x62, 0xd1, 0x05, 0xdd, 0x44, 0x25, 0x7d, 0x80, 0xee, 0x59, 0x55, 0x33, 0x1e, + 0xff, 0x84, 0x80, 0x88, 0xba, 0xe8, 0x22, 0x52, 0x7c, 0xe6, 0x9c, 0xf3, 0x7d, 0xe7, 0xcb, 0xe7, + 0x09, 0xb0, 0x4e, 0xf6, 0x18, 0xf2, 0x02, 0xcb, 0x0e, 0x3d, 0xf1, 0xe9, 0x79, 0xcc, 0x09, 0x46, + 0x84, 0x9e, 0x59, 0xa3, 0x46, 0x97, 0x70, 0x7b, 0xcb, 0x72, 0x89, 0x4f, 0xa8, 0xcd, 0x49, 0x0f, + 0x85, 0x34, 0xe0, 0x01, 0x34, 0x23, 0x01, 0xb2, 0x43, 0x0f, 0x65, 0x05, 0x48, 0x09, 0xd6, 0x1f, + 0xba, 0x1e, 0xef, 0x0f, 0xbb, 0xc8, 0x09, 0x06, 0x96, 0x1b, 0xb8, 0x81, 0x25, 0x75, 0xdd, 0xe1, + 0xb1, 0x7c, 0x92, 0x0f, 0xf2, 0x5b, 0xe4, 0xb7, 0xbe, 0x9d, 0x36, 0x30, 0xb0, 0x9d, 0xbe, 0xe7, + 0x8b, 0xe2, 0xe1, 0x89, 0x2b, 0x00, 0x66, 0x0d, 0x08, 0xb7, 0xad, 0xd1, 0x4c, 0x17, 0xeb, 0xd6, + 0x5d, 0x2a, 0x3a, 0xf4, 0xb9, 0x37, 0x20, 0x33, 0x82, 0xdd, 0xfb, 0x04, 0xcc, 0xe9, 0x93, 0x81, + 0x7d, 0x53, 0x57, 0xff, 0xa6, 0x81, 0xe5, 0xe6, 0x61, 0xfb, 0x29, 0x0d, 0x86, 0xe1, 0x7e, 0x9c, + 0x15, 0xbe, 0x07, 0x45, 0xd1, 0x59, 0xcf, 0xe6, 0xb6, 0xae, 0x6d, 0x68, 0x9b, 0xe5, 0xc6, 0x23, + 0x94, 0xce, 0x25, 0x29, 0x80, 0xc2, 0x13, 0x57, 0x00, 0x0c, 0x09, 0x36, 0x1a, 0x6d, 0xa1, 0x57, + 0xdd, 0x0f, 0xc4, 0xe1, 0x2f, 0x08, 0xb7, 0x5b, 0xf0, 0x7c, 0x6c, 0xe6, 0x26, 0x63, 0x13, 0xa4, + 0x18, 0x4e, 0x5c, 0x61, 0x17, 0x14, 0x47, 0x84, 0x32, 0x2f, 0xf0, 0x99, 0xbe, 0xb0, 0x91, 0xdf, + 0x2c, 0x37, 0xb6, 0xd1, 0x3d, 0x93, 0x47, 0xcd, 0xc3, 0xf6, 0x51, 0xa4, 0x49, 0x3a, 0x6d, 0xd5, + 0x54, 0x95, 0xa2, 0x3a, 0x61, 0x38, 0xf1, 0xad, 0x7f, 0xd1, 0xc0, 0xda, 0x4c, 0xb6, 0xe7, 0x1e, + 0xe3, 0xf0, 0xdd, 0x4c, 0x3e, 0x34, 0x5f, 0x3e, 0xa1, 0x96, 0xe9, 0x92, 0xba, 0x31, 0x92, 0xc9, + 0xf6, 0x06, 0x14, 0x3c, 0x4e, 0x06, 0x71, 0xb0, 0xc6, 0x3c, 0xc1, 0xa6, 0x9b, 0x6c, 0x55, 0x95, + 0x7d, 0xa1, 0x2d, 0x8c, 0x70, 0xe4, 0x57, 0xff, 0xbc, 0x08, 0x56, 0x9b, 0x87, 0x6d, 0x4c, 0x58, + 0x30, 0xa4, 0x0e, 0x49, 0x7f, 0xaf, 0x07, 0xa0, 0x48, 0x15, 0x28, 0xf3, 0x94, 0xd2, 0xfe, 0x62, + 0x32, 0x4e, 0x18, 0xf0, 0x14, 0x54, 0x28, 0x61, 0x61, 0xe0, 0x33, 0xf2, 0xcc, 0xf3, 0x7b, 0xfa, + 0x82, 0x9c, 0xc0, 0xee, 0x7c, 0x13, 0x90, 0x8d, 0xaa, 0x61, 0x0b, 0x75, 0xab, 0x36, 0x19, 0x9b, + 0x15, 0x9c, 0xf1, 0xc3, 0x53, 0xee, 0x70, 0x1b, 0x14, 0x98, 0x13, 0x84, 0x44, 0xcf, 0xcb, 0xc6, + 0x8c, 0x38, 0x59, 0x47, 0x80, 0xd7, 0x63, 0xb3, 0x1a, 0x77, 0x28, 0x01, 0x1c, 0x91, 0xe1, 0x3e, + 0xa8, 0x31, 0xcf, 0x77, 0x87, 0xa7, 0x36, 0x8d, 0xcf, 0xf5, 0x45, 0x69, 0xa0, 0x2b, 0x83, 0x5a, + 0xe7, 0xc6, 0x39, 0x9e, 0x51, 0x40, 0x13, 0x14, 0x46, 0x84, 0x76, 0x99, 0x5e, 0xd8, 0xc8, 0x6f, + 0x96, 0x5a, 0x25, 0x51, 0xf7, 0x48, 0x00, 0x38, 0xc2, 0x21, 0x02, 0x80, 0xf5, 0x03, 0xca, 0x5f, + 0xda, 0x03, 0xc2, 0xf4, 0xbf, 0x24, 0xeb, 0x6f, 0xb1, 0xb4, 0x9d, 0x04, 0xc5, 0x19, 0x86, 0xe0, + 0x3b, 0x36, 0x27, 0x6e, 0x40, 0x3d, 0xc2, 0xf4, 0xa5, 0x94, 0xff, 0x38, 0x41, 0x71, 0x86, 0x01, + 0x29, 0xa8, 0xb0, 0x61, 0x37, 0x9e, 0x3c, 0xd3, 0x8b, 0x72, 0x23, 0xf6, 0xe6, 0xd9, 0x88, 0x4e, + 0xaa, 0x4b, 0xf7, 0x62, 0x55, 0x85, 0xaf, 0x64, 0x4e, 0x19, 0x9e, 0xaa, 0x51, 0xff, 0xba, 0x00, + 0xfe, 0xbd, 0x43, 0x0f, 0x77, 0x40, 0x39, 0xc3, 0x55, 0xbb, 0xb2, 0xa2, 0x4c, 0xcb, 0x19, 0x09, + 0xce, 0xf2, 0xfe, 0xf0, 0xc6, 0x30, 0x50, 0xb5, 0x1d, 0x87, 0x84, 0x9c, 0xf4, 0x5e, 0x9f, 0x85, + 0x84, 0xe9, 0x79, 0x39, 0xb5, 0xdf, 0x2d, 0xb7, 0xa6, 0xe2, 0x55, 0x9b, 0x59, 0x53, 0x3c, 0x5d, + 0x23, 0x5d, 0x95, 0xc5, 0xdb, 0x57, 0xa5, 0xfe, 0x53, 0x03, 0x2b, 0xb7, 0xdc, 0x40, 0xf0, 0x7f, + 0xb0, 0xa4, 0x6e, 0x1c, 0x35, 0xce, 0x7f, 0x54, 0xbd, 0x25, 0x45, 0xc5, 0xf1, 0x39, 0x3c, 0x06, + 0xa5, 0x74, 0x15, 0xa2, 0xcb, 0x61, 0x67, 0x9e, 0x55, 0x98, 0x79, 0xe1, 0x5b, 0xcb, 0xaa, 0x46, + 0x09, 0x27, 0x4b, 0x90, 0x5a, 0xc3, 0x03, 0x50, 0x3a, 0xa6, 0x84, 0xf5, 0x7d, 0xc2, 0x98, 0x7a, + 0xed, 0xfe, 0x8b, 0x05, 0x4f, 0xe2, 0x83, 0xeb, 0xb1, 0x09, 0x13, 0xc3, 0x04, 0xc5, 0xa9, 0xb2, + 0x75, 0x70, 0x7e, 0x65, 0xe4, 0x2e, 0xae, 0x8c, 0xdc, 0xe5, 0x95, 0x91, 0xfb, 0x38, 0x31, 0xb4, + 0xf3, 0x89, 0xa1, 0x5d, 0x4c, 0x0c, 0xed, 0x72, 0x62, 0x68, 0xdf, 0x27, 0x86, 0xf6, 0xe9, 0x87, + 0x91, 0x7b, 0x6b, 0xde, 0xf3, 0x0f, 0xfb, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x85, 0x3b, 0x06, + 0x83, 0x07, 0x00, 0x00, +} + +func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIGroupDiscoveryList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupDiscoveryList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupDiscoveryList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIResourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Subresources) > 0 { + for iNdEx := len(m.Subresources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subresources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.SingularResource) + copy(dAtA[i:], m.SingularResource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularResource))) + i-- + dAtA[i] = 0x22 + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0x1a + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APISubresourceDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APISubresourceDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APISubresourceDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.AcceptedTypes) > 0 { + for iNdEx := len(m.AcceptedTypes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AcceptedTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ResponseKind != nil { + { + size, err := m.ResponseKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIVersionDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIVersionDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersionDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Freshness) + copy(dAtA[i:], m.Freshness) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Freshness))) + i-- + dAtA[i] = 0x1a + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APIGroupDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupDiscoveryList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SingularResource) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Subresources) > 0 { + for _, e := range m.Subresources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APISubresourceDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResponseKind != nil { + l = m.ResponseKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AcceptedTypes) > 0 { + for _, e := range m.AcceptedTypes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersionDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Freshness) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroupDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]APIVersionDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "APIVersionDiscovery", "APIVersionDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + s := strings.Join([]string{`&APIGroupDiscovery{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupDiscoveryList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]APIGroupDiscovery{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIGroupDiscovery", "APIGroupDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&APIGroupDiscoveryList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubresources := "[]APISubresourceDiscovery{" + for _, f := range this.Subresources { + repeatedStringForSubresources += strings.Replace(strings.Replace(f.String(), "APISubresourceDiscovery", "APISubresourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForSubresources += "}" + s := strings.Join([]string{`&APIResourceDiscovery{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, + `SingularResource:` + fmt.Sprintf("%v", this.SingularResource) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Subresources:` + repeatedStringForSubresources + `,`, + `}`, + }, "") + return s +} +func (this *APISubresourceDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForAcceptedTypes := "[]GroupVersionKind{" + for _, f := range this.AcceptedTypes { + repeatedStringForAcceptedTypes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAcceptedTypes += "}" + s := strings.Join([]string{`&APISubresourceDiscovery{`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `ResponseKind:` + strings.Replace(fmt.Sprintf("%v", this.ResponseKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, + `AcceptedTypes:` + repeatedStringForAcceptedTypes + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `}`, + }, "") + return s +} +func (this *APIVersionDiscovery) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]APIResourceDiscovery{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "APIResourceDiscovery", "APIResourceDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&APIVersionDiscovery{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Freshness:` + fmt.Sprintf("%v", this.Freshness) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroupDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, APIVersionDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupDiscoveryList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupDiscoveryList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupDiscoveryList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, APIGroupDiscovery{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = ResourceScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresources = append(m.Subresources, APISubresourceDiscovery{}) + if err := m.Subresources[len(m.Subresources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APISubresourceDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APISubresourceDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APISubresourceDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseKind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseKind == nil { + m.ResponseKind = &v1.GroupVersionKind{} + } + if err := m.ResponseKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedTypes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AcceptedTypes = append(m.AcceptedTypes, v1.GroupVersionKind{}) + if err := m.AcceptedTypes[len(m.AcceptedTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersionDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersionDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersionDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, APIResourceDiscovery{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Freshness", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Freshness = DiscoveryFreshness(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto new file mode 100644 index 000000000..e9ae88072 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto @@ -0,0 +1,156 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.apidiscovery.v2beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/apidiscovery/v2beta1"; + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +message APIGroupDiscovery { + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + repeated APIVersionDiscovery versions = 2; +} + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +message APIGroupDiscoveryList { + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of groups for discovery. The groups are listed in priority order. + repeated APIGroupDiscovery items = 2; +} + +// APIResourceDiscovery provides information about an API resource for discovery. +message APIResourceDiscovery { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + optional string resource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // scope indicates the scope of a resource, either Cluster or Namespaced + optional string scope = 3; + + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + optional string singularResource = 4; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + repeated string verbs = 5; + + // shortNames is a list of suggested short names of the resource. + // +listType=set + repeated string shortNames = 6; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + repeated string categories = 7; + + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + repeated APISubresourceDiscovery subresources = 8; +} + +// APISubresourceDiscovery provides information about an API subresource for discovery. +message APISubresourceDiscovery { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + optional string subresource = 1; + + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + repeated string verbs = 4; +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +message APIVersionDiscovery { + // version is the name of the version within a group version. + optional string version = 1; + + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + repeated APIResourceDiscovery resources = 2; + + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + optional string freshness = 3; +} + diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/register.go b/vendor/k8s.io/api/apidiscovery/v2beta1/register.go new file mode 100644 index 000000000..f7cef1081 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "apidiscovery.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &APIGroupDiscoveryList{}, + &APIGroupDiscovery{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/types.go b/vendor/k8s.io/api/apidiscovery/v2beta1/types.go new file mode 100644 index 000000000..834293773 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/types.go @@ -0,0 +1,163 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.32 +// +k8s:prerelease-lifecycle-gen:removed=1.35 +// The deprecate and remove versions stated above are rough estimates and may be subject to change. We are estimating v2 types will be available in 1.28 and will support 4 versions where both v2beta1 and v2 are supported before deprecation. + +// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. +// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated +// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers) +// that a cluster supports. +type APIGroupDiscoveryList struct { + v1.TypeMeta `json:",inline"` + // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of groups for discovery. The groups are listed in priority order. + Items []APIGroupDiscovery `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.32 +// +k8s:prerelease-lifecycle-gen:removed=1.35 +// The deprecate and remove versions stated above are rough estimates and may be subject to change. We are estimating v2 types will be available in 1.28 and will support 4 versions where both v2beta1 and v2 are supported before deprecation. + +// APIGroupDiscovery holds information about which resources are being served for all version of the API Group. +// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. +// Versions are in descending order of preference, with the first version being the preferred entry. +type APIGroupDiscovery struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // The only field completed will be name. For instance, resourceVersion will be empty. + // name is the name of the API group whose discovery information is presented here. + // name is allowed to be "" to represent the legacy, ungroupified resources. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // versions are the versions supported in this group. They are sorted in descending order of preference, + // with the preferred version being the first entry. + // +listType=map + // +listMapKey=version + Versions []APIVersionDiscovery `json:"versions,omitempty" protobuf:"bytes,2,rep,name=versions"` +} + +// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group. +type APIVersionDiscovery struct { + // version is the name of the version within a group version. + Version string `json:"version" protobuf:"bytes,1,opt,name=version"` + // resources is a list of APIResourceDiscovery objects for the corresponding group version. + // +listType=map + // +listMapKey=resource + Resources []APIResourceDiscovery `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // freshness marks whether a group version's discovery document is up to date. + // "Current" indicates the discovery document was recently + // refreshed. "Stale" indicates the discovery document could not + // be retrieved and the returned discovery document may be + // significantly out of date. Clients that require the latest + // version of the discovery information be retrieved before + // performing an operation should not use the aggregated document + Freshness DiscoveryFreshness `json:"freshness,omitempty" protobuf:"bytes,3,opt,name=freshness"` +} + +// APIResourceDiscovery provides information about an API resource for discovery. +type APIResourceDiscovery struct { + // resource is the plural name of the resource. This is used in the URL path and is the unique identifier + // for this resource across all versions in the API group. + // Resources with non-empty groups are located at /apis/// + // Resources with empty groups are located at /api/v1/ + Resource string `json:"resource" protobuf:"bytes,1,opt,name=resource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // scope indicates the scope of a resource, either Cluster or Namespaced + Scope ResourceScope `json:"scope" protobuf:"bytes,3,opt,name=scope"` + // singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence. + // The command line tool kubectl, for example, allows use of the singular resource name in place of plurals. + // The singular form of a resource should always be an optional element - when in doubt use the canonical resource name. + SingularResource string `json:"singularResource" protobuf:"bytes,4,opt,name=singularResource"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,5,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + // +listType=set + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,6,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all'). + // Clients may use this to simplify acting on multiple resource types at once. + // +listType=set + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // subresources is a list of subresources provided by this resource. Subresources are located at /apis////name-of-instance/ + // +listType=map + // +listMapKey=subresource + Subresources []APISubresourceDiscovery `json:"subresources,omitempty" protobuf:"bytes,8,rep,name=subresources"` +} + +// ResourceScope is an enum defining the different scopes available to a resource. +type ResourceScope string + +const ( + ScopeCluster ResourceScope = "Cluster" + ScopeNamespace ResourceScope = "Namespaced" +) + +// DiscoveryFreshness is an enum defining whether the Discovery document published by an apiservice is up to date (fresh). +type DiscoveryFreshness string + +const ( + DiscoveryFreshnessCurrent DiscoveryFreshness = "Current" + DiscoveryFreshnessStale DiscoveryFreshness = "Stale" +) + +// APISubresourceDiscovery provides information about an API subresource for discovery. +type APISubresourceDiscovery struct { + // subresource is the name of the subresource. This is used in the URL path and is the unique identifier + // for this resource across all versions. + Subresource string `json:"subresource" protobuf:"bytes,1,opt,name=subresource"` + // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. + // Some subresources do not return normal resources, these will have null or empty return types. + ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` + // acceptedTypes describes the kinds that this endpoint accepts. + // Subresources may accept the standard content types or define + // custom negotiation schemes. The list may not be exhaustive for + // all operations. + // +listType=map + // +listMapKey=group + // +listMapKey=version + // +listMapKey=kind + AcceptedTypes []v1.GroupVersionKind `json:"acceptedTypes,omitempty" protobuf:"bytes,3,rep,name=acceptedTypes"` + // verbs is a list of supported API operation types (this includes + // but is not limited to get, list, watch, create, update, patch, + // delete, deletecollection, and proxy). Subresources may define + // custom verbs outside the standard Kubernetes verb set. Clients + // should expect the behavior of standard verbs to align with + // Kubernetes interaction conventions. + // +listType=set + Verbs []string `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..cf8f98c6f --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.deepcopy.go @@ -0,0 +1,190 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscovery) DeepCopyInto(out *APIGroupDiscovery) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersionDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscovery. +func (in *APIGroupDiscovery) DeepCopy() *APIGroupDiscovery { + if in == nil { + return nil + } + out := new(APIGroupDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscovery) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupDiscoveryList) DeepCopyInto(out *APIGroupDiscoveryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIGroupDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscoveryList. +func (in *APIGroupDiscoveryList) DeepCopy() *APIGroupDiscoveryList { + if in == nil { + return nil + } + out := new(APIGroupDiscoveryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupDiscoveryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceDiscovery) DeepCopyInto(out *APIResourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Subresources != nil { + in, out := &in.Subresources, &out.Subresources + *out = make([]APISubresourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceDiscovery. +func (in *APIResourceDiscovery) DeepCopy() *APIResourceDiscovery { + if in == nil { + return nil + } + out := new(APIResourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APISubresourceDiscovery) DeepCopyInto(out *APISubresourceDiscovery) { + *out = *in + if in.ResponseKind != nil { + in, out := &in.ResponseKind, &out.ResponseKind + *out = new(v1.GroupVersionKind) + **out = **in + } + if in.AcceptedTypes != nil { + in, out := &in.AcceptedTypes, &out.AcceptedTypes + *out = make([]v1.GroupVersionKind, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISubresourceDiscovery. +func (in *APISubresourceDiscovery) DeepCopy() *APISubresourceDiscovery { + if in == nil { + return nil + } + out := new(APISubresourceDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersionDiscovery) DeepCopyInto(out *APIVersionDiscovery) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]APIResourceDiscovery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersionDiscovery. +func (in *APIVersionDiscovery) DeepCopy() *APIVersionDiscovery { + if in == nil { + return nil + } + out := new(APIVersionDiscovery) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..aa8f15fb4 --- /dev/null +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2beta1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *APIGroupDiscovery) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *APIGroupDiscovery) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *APIGroupDiscoveryList) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *APIGroupDiscoveryList) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go index ee12c7d0d..b0343ffcf 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +// source: k8s.io/api/apiserverinternal/v1alpha1/generated.proto package v1alpha1 @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} } func (*ServerStorageVersion) ProtoMessage() {} func (*ServerStorageVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{0} + return fileDescriptor_126bcbf538b54729, []int{0} } func (m *ServerStorageVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ServerStorageVersion proto.InternalMessageInfo func (m *StorageVersion) Reset() { *m = StorageVersion{} } func (*StorageVersion) ProtoMessage() {} func (*StorageVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{1} + return fileDescriptor_126bcbf538b54729, []int{1} } func (m *StorageVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_StorageVersion proto.InternalMessageInfo func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} } func (*StorageVersionCondition) ProtoMessage() {} func (*StorageVersionCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{2} + return fileDescriptor_126bcbf538b54729, []int{2} } func (m *StorageVersionCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_StorageVersionCondition proto.InternalMessageInfo func (m *StorageVersionList) Reset() { *m = StorageVersionList{} } func (*StorageVersionList) ProtoMessage() {} func (*StorageVersionList) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{3} + return fileDescriptor_126bcbf538b54729, []int{3} } func (m *StorageVersionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_StorageVersionList proto.InternalMessageInfo func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} } func (*StorageVersionSpec) ProtoMessage() {} func (*StorageVersionSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{4} + return fileDescriptor_126bcbf538b54729, []int{4} } func (m *StorageVersionSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_StorageVersionSpec proto.InternalMessageInfo func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} } func (*StorageVersionStatus) ProtoMessage() {} func (*StorageVersionStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_a3903ff5e3cc7a03, []int{5} + return fileDescriptor_126bcbf538b54729, []int{5} } func (m *StorageVersionStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -221,59 +221,60 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_a3903ff5e3cc7a03) + proto.RegisterFile("k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_126bcbf538b54729) } -var fileDescriptor_a3903ff5e3cc7a03 = []byte{ - // 763 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x4f, 0x13, 0x41, - 0x14, 0xef, 0xd2, 0x52, 0x60, 0xaa, 0x54, 0x46, 0x08, 0xb5, 0x26, 0x5b, 0x6c, 0xa2, 0x41, 0x8d, - 0xbb, 0xd2, 0x88, 0x91, 0x98, 0x68, 0x58, 0x20, 0x06, 0x03, 0x62, 0x06, 0xe2, 0x01, 0x3d, 0x38, - 0xdd, 0x1d, 0xb7, 0x6b, 0xbb, 0x3b, 0x9b, 0x9d, 0x69, 0x13, 0x2e, 0xc6, 0x8f, 0xe0, 0x07, 0xf1, - 0xe8, 0x87, 0xe0, 0x64, 0xb8, 0x98, 0x90, 0x98, 0x34, 0xb2, 0x7e, 0x0b, 0x4e, 0x66, 0x66, 0x77, - 0x5b, 0xb6, 0x2d, 0xb1, 0xe1, 0xb0, 0xc9, 0xce, 0x7b, 0xef, 0xf7, 0x7b, 0x7f, 0xe6, 0x37, 0x0f, - 0xbc, 0x69, 0x3e, 0x63, 0x9a, 0x43, 0xf5, 0x66, 0xbb, 0x4e, 0x02, 0x8f, 0x70, 0xc2, 0xf4, 0x0e, - 0xf1, 0x2c, 0x1a, 0xe8, 0xb1, 0x03, 0xfb, 0x8e, 0xf8, 0x18, 0x09, 0x3a, 0x24, 0x70, 0x3c, 0x4e, - 0x02, 0x0f, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0xf9, 0x0d, 0xbc, 0xa2, 0xdb, 0xc4, 0x23, 0x01, 0xe6, - 0xc4, 0xd2, 0xfc, 0x80, 0x72, 0x0a, 0xef, 0x46, 0x30, 0x0d, 0xfb, 0x8e, 0x36, 0x04, 0xd3, 0x12, - 0x58, 0xf9, 0x91, 0xed, 0xf0, 0x46, 0xbb, 0xae, 0x99, 0xd4, 0xd5, 0x6d, 0x6a, 0x53, 0x5d, 0xa2, - 0xeb, 0xed, 0x4f, 0xf2, 0x24, 0x0f, 0xf2, 0x2f, 0x62, 0x2d, 0x3f, 0xe9, 0x17, 0xe3, 0x62, 0xb3, - 0xe1, 0x78, 0x24, 0x38, 0xd2, 0xfd, 0xa6, 0x2d, 0x2b, 0xd3, 0x5d, 0xc2, 0xb1, 0xde, 0x19, 0xaa, - 0xa5, 0xac, 0x5f, 0x86, 0x0a, 0xda, 0x1e, 0x77, 0x5c, 0x32, 0x04, 0x78, 0xfa, 0x3f, 0x00, 0x33, - 0x1b, 0xc4, 0xc5, 0x83, 0xb8, 0xea, 0x2f, 0x05, 0xcc, 0xef, 0xcb, 0x4e, 0xf7, 0x39, 0x0d, 0xb0, - 0x4d, 0xde, 0x91, 0x80, 0x39, 0xd4, 0x83, 0xab, 0xa0, 0x80, 0x7d, 0x27, 0x72, 0x6d, 0x6f, 0x96, - 0x94, 0x25, 0x65, 0x79, 0xc6, 0xb8, 0x79, 0xdc, 0xad, 0x64, 0xc2, 0x6e, 0xa5, 0xb0, 0xfe, 0x76, - 0x3b, 0x71, 0xa1, 0x8b, 0x71, 0x70, 0x1d, 0x14, 0x89, 0x67, 0x52, 0xcb, 0xf1, 0xec, 0x98, 0xa9, - 0x34, 0x21, 0xa1, 0x8b, 0x31, 0xb4, 0xb8, 0x95, 0x76, 0xa3, 0xc1, 0x78, 0xb8, 0x01, 0xe6, 0x2c, - 0x62, 0x52, 0x0b, 0xd7, 0x5b, 0x49, 0x35, 0xac, 0x94, 0x5d, 0xca, 0x2e, 0xcf, 0x18, 0x0b, 0x61, - 0xb7, 0x32, 0xb7, 0x39, 0xe8, 0x44, 0xc3, 0xf1, 0xd5, 0x1f, 0x13, 0x60, 0x76, 0xa0, 0xa3, 0x8f, - 0x60, 0x5a, 0x8c, 0xdb, 0xc2, 0x1c, 0xcb, 0x76, 0x0a, 0xb5, 0xc7, 0x5a, 0xff, 0xca, 0x7b, 0x53, - 0xd3, 0xfc, 0xa6, 0x2d, 0xef, 0x5f, 0x13, 0xd1, 0x5a, 0x67, 0x45, 0xdb, 0xab, 0x7f, 0x26, 0x26, - 0xdf, 0x25, 0x1c, 0x1b, 0x30, 0xee, 0x02, 0xf4, 0x6d, 0xa8, 0xc7, 0x0a, 0xdf, 0x83, 0x1c, 0xf3, - 0x89, 0x29, 0x3b, 0x2e, 0xd4, 0xd6, 0xb4, 0xb1, 0x04, 0xa5, 0xa5, 0xcb, 0xdc, 0xf7, 0x89, 0x69, - 0x5c, 0x8b, 0xd3, 0xe4, 0xc4, 0x09, 0x49, 0x52, 0x68, 0x82, 0x3c, 0xe3, 0x98, 0xb7, 0xc5, 0x2c, - 0x04, 0xfd, 0xf3, 0xab, 0xd1, 0x4b, 0x0a, 0x63, 0x36, 0x4e, 0x90, 0x8f, 0xce, 0x28, 0xa6, 0xae, - 0x7e, 0xcf, 0x82, 0xc5, 0x34, 0x60, 0x83, 0x7a, 0x96, 0xc3, 0xc5, 0xfc, 0x5e, 0x82, 0x1c, 0x3f, - 0xf2, 0x49, 0x2c, 0x85, 0x87, 0x49, 0x89, 0x07, 0x47, 0x3e, 0x39, 0xef, 0x56, 0x6e, 0x5f, 0x02, - 0x13, 0x6e, 0x24, 0x81, 0x70, 0xad, 0xd7, 0x41, 0x24, 0x89, 0x3b, 0xe9, 0x22, 0xce, 0xbb, 0x95, - 0x62, 0x0f, 0x96, 0xae, 0x0b, 0xbe, 0x06, 0x90, 0xd6, 0x65, 0x87, 0xd6, 0xab, 0x48, 0xc1, 0x42, - 0x59, 0x62, 0x10, 0x59, 0xa3, 0x1c, 0xd3, 0xc0, 0xbd, 0xa1, 0x08, 0x34, 0x02, 0x05, 0x3b, 0x00, - 0xb6, 0x30, 0xe3, 0x07, 0x01, 0xf6, 0x58, 0x54, 0xa2, 0xe3, 0x92, 0x52, 0x4e, 0x0e, 0xf5, 0xc1, - 0x78, 0x8a, 0x10, 0x88, 0x7e, 0xde, 0x9d, 0x21, 0x36, 0x34, 0x22, 0x03, 0xbc, 0x07, 0xf2, 0x01, - 0xc1, 0x8c, 0x7a, 0xa5, 0x49, 0xd9, 0x7e, 0xef, 0x0e, 0x90, 0xb4, 0xa2, 0xd8, 0x0b, 0xef, 0x83, - 0x29, 0x97, 0x30, 0x86, 0x6d, 0x52, 0xca, 0xcb, 0xc0, 0x62, 0x1c, 0x38, 0xb5, 0x1b, 0x99, 0x51, - 0xe2, 0xaf, 0xfe, 0x54, 0x00, 0x4c, 0xcf, 0x7d, 0xc7, 0x61, 0x1c, 0x7e, 0x18, 0x52, 0xba, 0x36, - 0x5e, 0x5f, 0x02, 0x2d, 0x75, 0x7e, 0x23, 0x4e, 0x39, 0x9d, 0x58, 0x2e, 0xa8, 0xfc, 0x10, 0x4c, - 0x3a, 0x9c, 0xb8, 0xe2, 0x16, 0xb3, 0xcb, 0x85, 0xda, 0xea, 0x95, 0x74, 0x68, 0x5c, 0x8f, 0x33, - 0x4c, 0x6e, 0x0b, 0x2e, 0x14, 0x51, 0x56, 0xe7, 0x07, 0xfb, 0x11, 0x0f, 0xa0, 0xfa, 0x7b, 0x02, - 0xcc, 0x8f, 0x92, 0x31, 0xfc, 0x02, 0x8a, 0x2c, 0x65, 0x67, 0x25, 0x45, 0x16, 0x35, 0xf6, 0xe3, - 0x18, 0xb1, 0xfa, 0xfa, 0xab, 0x2a, 0x6d, 0x67, 0x68, 0x30, 0x19, 0xdc, 0x03, 0x0b, 0x26, 0x75, - 0x5d, 0xea, 0x6d, 0x8d, 0xdc, 0x79, 0xb7, 0xc2, 0x6e, 0x65, 0x61, 0x63, 0x54, 0x00, 0x1a, 0x8d, - 0x83, 0x01, 0x00, 0x66, 0xf2, 0x04, 0xa2, 0xa5, 0x57, 0xa8, 0xbd, 0xb8, 0xd2, 0x80, 0x7b, 0x2f, - 0xa9, 0xbf, 0xb3, 0x7a, 0x26, 0x86, 0x2e, 0x64, 0x31, 0xb4, 0xe3, 0x33, 0x35, 0x73, 0x72, 0xa6, - 0x66, 0x4e, 0xcf, 0xd4, 0xcc, 0xd7, 0x50, 0x55, 0x8e, 0x43, 0x55, 0x39, 0x09, 0x55, 0xe5, 0x34, - 0x54, 0x95, 0x3f, 0xa1, 0xaa, 0x7c, 0xfb, 0xab, 0x66, 0x0e, 0xa7, 0x93, 0x3c, 0xff, 0x02, 0x00, - 0x00, 0xff, 0xff, 0xa1, 0x5f, 0xcf, 0x37, 0x78, 0x07, 0x00, 0x00, +var fileDescriptor_126bcbf538b54729 = []byte{ + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0x13, 0x41, + 0x14, 0xee, 0xd2, 0x52, 0x60, 0xaa, 0xad, 0x8c, 0x10, 0x6a, 0x4d, 0xb6, 0xd8, 0x04, 0x83, 0x1a, + 0x77, 0xa5, 0x11, 0x23, 0x9a, 0x68, 0x58, 0x20, 0x06, 0x85, 0x60, 0xa6, 0xc4, 0x03, 0x7a, 0x70, + 0xba, 0x1d, 0xb7, 0x2b, 0xdd, 0x9d, 0xcd, 0xce, 0xb4, 0x09, 0x17, 0xe3, 0x4f, 0xd0, 0xff, 0xe1, + 0xd1, 0x1f, 0xc1, 0xc9, 0x70, 0x24, 0x31, 0x69, 0x64, 0xfd, 0x17, 0x9c, 0xcc, 0xcc, 0x6e, 0xb7, + 0x6c, 0x5b, 0x62, 0xc3, 0xa1, 0x49, 0xe7, 0xbd, 0xf7, 0x7d, 0xef, 0xcd, 0x37, 0xdf, 0xcc, 0x82, + 0xd5, 0xc3, 0xa7, 0x4c, 0xb3, 0xa9, 0x8e, 0x3d, 0x5b, 0xfc, 0x18, 0xf1, 0x3b, 0xc4, 0xb7, 0x5d, + 0x4e, 0x7c, 0x17, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0x79, 0x4d, 0xbc, 0xa2, 0x5b, 0xc4, 0x25, 0x3e, + 0xe6, 0xa4, 0xa1, 0x79, 0x3e, 0xe5, 0x14, 0x2e, 0x85, 0x30, 0x0d, 0x7b, 0xb6, 0x36, 0x04, 0xd3, + 0x7a, 0xb0, 0xd2, 0x43, 0xcb, 0xe6, 0xcd, 0x76, 0x5d, 0x33, 0xa9, 0xa3, 0x5b, 0xd4, 0xa2, 0xba, + 0x44, 0xd7, 0xdb, 0x9f, 0xe4, 0x4a, 0x2e, 0xe4, 0xbf, 0x90, 0xb5, 0xf4, 0xb8, 0x3f, 0x8c, 0x83, + 0xcd, 0xa6, 0xed, 0x12, 0xff, 0x48, 0xf7, 0x0e, 0x2d, 0x39, 0x99, 0xee, 0x10, 0x8e, 0xf5, 0xce, + 0xd0, 0x2c, 0x25, 0xfd, 0x32, 0x94, 0xdf, 0x76, 0xb9, 0xed, 0x90, 0x21, 0xc0, 0x93, 0xff, 0x01, + 0x98, 0xd9, 0x24, 0x0e, 0x1e, 0xc4, 0x55, 0xbe, 0x4f, 0x80, 0xb9, 0x9a, 0xdc, 0x69, 0x8d, 0x53, + 0x1f, 0x5b, 0xe4, 0x1d, 0xf1, 0x99, 0x4d, 0x5d, 0xb8, 0x0a, 0x72, 0xd8, 0xb3, 0xc3, 0xd4, 0xf6, + 0x66, 0x51, 0x59, 0x54, 0x96, 0x67, 0x8c, 0x9b, 0xc7, 0xdd, 0x72, 0x2a, 0xe8, 0x96, 0x73, 0xeb, + 0x6f, 0xb7, 0x7b, 0x29, 0x74, 0xb1, 0x0e, 0xae, 0x83, 0x02, 0x71, 0x4d, 0xda, 0xb0, 0x5d, 0x2b, + 0x62, 0x2a, 0x4e, 0x48, 0xe8, 0x42, 0x04, 0x2d, 0x6c, 0x25, 0xd3, 0x68, 0xb0, 0x1e, 0x6e, 0x80, + 0xd9, 0x06, 0x31, 0x69, 0x03, 0xd7, 0x5b, 0xbd, 0x69, 0x58, 0x31, 0xbd, 0x98, 0x5e, 0x9e, 0x31, + 0xe6, 0x83, 0x6e, 0x79, 0x76, 0x73, 0x30, 0x89, 0x86, 0xeb, 0xe1, 0x33, 0x90, 0x97, 0x07, 0xd8, + 0x88, 0x19, 0x32, 0x92, 0x01, 0x06, 0xdd, 0x72, 0xbe, 0x96, 0xc8, 0xa0, 0x81, 0xca, 0xca, 0xcf, + 0x09, 0x90, 0x1f, 0x50, 0xe3, 0x23, 0x98, 0x16, 0x47, 0xd5, 0xc0, 0x1c, 0x4b, 0x29, 0x72, 0xd5, + 0x47, 0x5a, 0xdf, 0x2e, 0xb1, 0xe2, 0x9a, 0x77, 0x68, 0x49, 0xef, 0x68, 0xa2, 0x5a, 0xeb, 0xac, + 0x68, 0x7b, 0xf5, 0xcf, 0xc4, 0xe4, 0xbb, 0x84, 0x63, 0x03, 0x46, 0x0a, 0x80, 0x7e, 0x0c, 0xc5, + 0xac, 0xf0, 0x3d, 0xc8, 0x30, 0x8f, 0x98, 0x52, 0xad, 0x5c, 0x75, 0x4d, 0x1b, 0xcb, 0x8c, 0x5a, + 0x72, 0xcc, 0x9a, 0x47, 0x4c, 0xe3, 0x5a, 0xd4, 0x26, 0x23, 0x56, 0x48, 0x92, 0x42, 0x13, 0x64, + 0x19, 0xc7, 0xbc, 0x2d, 0x74, 0x14, 0xf4, 0xcf, 0xaf, 0x46, 0x2f, 0x29, 0x8c, 0x7c, 0xd4, 0x20, + 0x1b, 0xae, 0x51, 0x44, 0x5d, 0xf9, 0x91, 0x06, 0x0b, 0x49, 0xc0, 0x06, 0x75, 0x1b, 0x36, 0x17, + 0xfa, 0xbd, 0x04, 0x19, 0x7e, 0xe4, 0x91, 0xc8, 0x46, 0x0f, 0x7a, 0x23, 0xee, 0x1f, 0x79, 0xe4, + 0xbc, 0x5b, 0xbe, 0x7d, 0x09, 0x4c, 0xa4, 0x91, 0x04, 0xc2, 0xb5, 0x78, 0x07, 0xa1, 0x9d, 0xee, + 0x24, 0x87, 0x38, 0xef, 0x96, 0x0b, 0x31, 0x2c, 0x39, 0x17, 0x7c, 0x0d, 0x20, 0xad, 0x87, 0x47, + 0xfc, 0x2a, 0x74, 0xbf, 0x70, 0xa5, 0x10, 0x22, 0x6d, 0x94, 0x22, 0x1a, 0xb8, 0x37, 0x54, 0x81, + 0x46, 0xa0, 0x60, 0x07, 0xc0, 0x16, 0x66, 0x7c, 0xdf, 0xc7, 0x2e, 0x0b, 0x47, 0xb4, 0x1d, 0x52, + 0xcc, 0x48, 0x51, 0xef, 0x8f, 0xe7, 0x08, 0x81, 0xe8, 0xf7, 0xdd, 0x19, 0x62, 0x43, 0x23, 0x3a, + 0xc0, 0xbb, 0x20, 0xeb, 0x13, 0xcc, 0xa8, 0x5b, 0x9c, 0x94, 0xdb, 0x8f, 0xcf, 0x00, 0xc9, 0x28, + 0x8a, 0xb2, 0xf0, 0x1e, 0x98, 0x72, 0x08, 0x63, 0xd8, 0x22, 0xc5, 0xac, 0x2c, 0x2c, 0x44, 0x85, + 0x53, 0xbb, 0x61, 0x18, 0xf5, 0xf2, 0x95, 0x5f, 0x0a, 0x80, 0x49, 0xdd, 0x77, 0x6c, 0xc6, 0xe1, + 0x87, 0x21, 0xa7, 0x6b, 0xe3, 0xed, 0x4b, 0xa0, 0xa5, 0xcf, 0x6f, 0x44, 0x2d, 0xa7, 0x7b, 0x91, + 0x0b, 0x2e, 0x3f, 0x00, 0x93, 0x36, 0x27, 0x8e, 0x38, 0xc5, 0xf4, 0x72, 0xae, 0xba, 0x7a, 0x25, + 0x1f, 0x1a, 0xd7, 0xa3, 0x0e, 0x93, 0xdb, 0x82, 0x0b, 0x85, 0x94, 0x95, 0xb9, 0xc1, 0xfd, 0x88, + 0x0b, 0x50, 0xf9, 0x2d, 0x1e, 0xb8, 0x11, 0x36, 0x86, 0x5f, 0x40, 0x81, 0x25, 0xe2, 0xac, 0xa8, + 0xc8, 0xa1, 0xc6, 0xbe, 0x1c, 0x23, 0x9e, 0xcd, 0xfe, 0x33, 0x97, 0x8c, 0x33, 0x34, 0xd8, 0x0c, + 0xee, 0x81, 0x79, 0x93, 0x3a, 0x0e, 0x75, 0xb7, 0x46, 0xbe, 0x97, 0xb7, 0x82, 0x6e, 0x79, 0x7e, + 0x63, 0x54, 0x01, 0x1a, 0x8d, 0x83, 0x3e, 0x00, 0x66, 0xef, 0x0a, 0x84, 0x0f, 0x66, 0xae, 0xfa, + 0xe2, 0x4a, 0x02, 0xc7, 0x37, 0xa9, 0xff, 0x66, 0xc5, 0x21, 0x86, 0x2e, 0x74, 0x31, 0xde, 0x1c, + 0x9f, 0xa9, 0xa9, 0x93, 0x33, 0x35, 0x75, 0x7a, 0xa6, 0xa6, 0xbe, 0x06, 0xaa, 0x72, 0x1c, 0xa8, + 0xca, 0x49, 0xa0, 0x2a, 0xa7, 0x81, 0xaa, 0xfc, 0x09, 0x54, 0xe5, 0xdb, 0x5f, 0x35, 0x75, 0xb0, + 0x34, 0xd6, 0x07, 0xf9, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x04, 0x7d, 0x78, 0xb8, 0x07, + 0x00, 0x00, } func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) { @@ -296,6 +297,15 @@ func (m *ServerStorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ServedVersions) > 0 { + for iNdEx := len(m.ServedVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ServedVersions[iNdEx]) + copy(dAtA[i:], m.ServedVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServedVersions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } if len(m.DecodableVersions) > 0 { for iNdEx := len(m.DecodableVersions) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.DecodableVersions[iNdEx]) @@ -582,6 +592,12 @@ func (m *ServerStorageVersion) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.ServedVersions) > 0 { + for _, s := range m.ServedVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -685,6 +701,7 @@ func (this *ServerStorageVersion) String() string { `APIServerID:` + fmt.Sprintf("%v", this.APIServerID) + `,`, `EncodingVersion:` + fmt.Sprintf("%v", this.EncodingVersion) + `,`, `DecodableVersions:` + fmt.Sprintf("%v", this.DecodableVersions) + `,`, + `ServedVersions:` + fmt.Sprintf("%v", this.ServedVersions) + `,`, `}`, }, "") return s @@ -896,6 +913,38 @@ func (m *ServerStorageVersion) Unmarshal(dAtA []byte) error { } m.DecodableVersions = append(m.DecodableVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedVersions = append(m.ServedVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto index d986a87d9..8a7786072 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -26,7 +26,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "k8s.io/api/apiserverinternal/v1alpha1"; // An API server instance reports the version it can decode and the version it // encodes objects to when persisting objects in the backend. @@ -42,12 +42,17 @@ message ServerStorageVersion { // The encodingVersion must be included in the decodableVersions. // +listType=set repeated string decodableVersions = 3; + + // The API server can serve these versions. + // DecodableVersions must include all ServedVersions. + // +listType=set + repeated string servedVersions = 4; } // Storage version of a specific resource. message StorageVersion { // The name is .. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is an empty spec. It is here to comply with Kubernetes API style. optional StorageVersionSpec spec = 2; @@ -72,8 +77,7 @@ message StorageVersionCondition { optional int64 observedGeneration = 3; // Last time the condition transitioned from one status to another. - // +required - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // The reason for the condition's last transition. // +required @@ -89,7 +93,7 @@ message StorageVersionList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items holds a list of StorageVersion repeated StorageVersion items = 2; diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go index a0437b507..31a419abf 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go @@ -77,6 +77,11 @@ type ServerStorageVersion struct { // The encodingVersion must be included in the decodableVersions. // +listType=set DecodableVersions []string `json:"decodableVersions,omitempty" protobuf:"bytes,3,opt,name=decodableVersions"` + + // The API server can serve these versions. + // DecodableVersions must include all ServedVersions. + // +listType=set + ServedVersions []string `json:"servedVersions,omitempty" protobuf:"bytes,4,opt,name=servedVersions"` } type StorageVersionConditionType string @@ -106,7 +111,6 @@ type StorageVersionCondition struct { // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` // Last time the condition transitioned from one status to another. - // +required LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` // The reason for the condition's last transition. // +required diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go index 6de934200..6fd1c3ebe 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ServerStorageVersion = map[string]string{ @@ -32,6 +32,7 @@ var map_ServerStorageVersion = map[string]string{ "apiServerID": "The ID of the reporting API server.", "encodingVersion": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).", "decodableVersions": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.", + "servedVersions": "The API server can serve these versions. DecodableVersions must include all ServedVersions.", } func (ServerStorageVersion) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go index 44dffa751..638d80140 100644 --- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go @@ -33,6 +33,11 @@ func (in *ServerStorageVersion) DeepCopyInto(out *ServerStorageVersion) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ServedVersions != nil { + in, out := &in.ServedVersions, &out.ServedVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 61dc97bde..d189e860f 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 81d51bd58..ea62a099f 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto +// source: k8s.io/api/apps/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{0} + return fileDescriptor_5b781835628d5338, []int{0} } func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } func (*ControllerRevisionList) ProtoMessage() {} func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{1} + return fileDescriptor_5b781835628d5338, []int{1} } func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{2} + return fileDescriptor_5b781835628d5338, []int{2} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{3} + return fileDescriptor_5b781835628d5338, []int{3} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{4} + return fileDescriptor_5b781835628d5338, []int{4} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{5} + return fileDescriptor_5b781835628d5338, []int{5} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{6} + return fileDescriptor_5b781835628d5338, []int{6} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{7} + return fileDescriptor_5b781835628d5338, []int{7} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{8} + return fileDescriptor_5b781835628d5338, []int{8} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{9} + return fileDescriptor_5b781835628d5338, []int{9} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{10} + return fileDescriptor_5b781835628d5338, []int{10} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{11} + return fileDescriptor_5b781835628d5338, []int{11} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{12} + return fileDescriptor_5b781835628d5338, []int{12} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{13} + return fileDescriptor_5b781835628d5338, []int{13} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{14} + return fileDescriptor_5b781835628d5338, []int{14} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{15} + return fileDescriptor_5b781835628d5338, []int{15} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{16} + return fileDescriptor_5b781835628d5338, []int{16} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +527,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{17} + return fileDescriptor_5b781835628d5338, []int{17} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +555,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{18} + return fileDescriptor_5b781835628d5338, []int{18} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +583,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{19} + return fileDescriptor_5b781835628d5338, []int{19} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +611,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{20} + return fileDescriptor_5b781835628d5338, []int{20} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +639,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{21} + return fileDescriptor_5b781835628d5338, []int{21} } func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +667,7 @@ var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{22} + return fileDescriptor_5b781835628d5338, []int{22} } func (m *StatefulSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +695,7 @@ var xxx_messageInfo_StatefulSet proto.InternalMessageInfo func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } func (*StatefulSetCondition) ProtoMessage() {} func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{23} + return fileDescriptor_5b781835628d5338, []int{23} } func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +723,7 @@ var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{24} + return fileDescriptor_5b781835628d5338, []int{24} } func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -748,12 +748,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } +func (*StatefulSetOrdinals) ProtoMessage() {} +func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { + return fileDescriptor_5b781835628d5338, []int{25} +} +func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetOrdinals.Merge(m, src) +} +func (m *StatefulSetOrdinals) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetOrdinals) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{25} + return fileDescriptor_5b781835628d5338, []int{26} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -781,7 +809,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{26} + return fileDescriptor_5b781835628d5338, []int{27} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -809,7 +837,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{27} + return fileDescriptor_5b781835628d5338, []int{28} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -837,7 +865,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{28} + return fileDescriptor_5b781835628d5338, []int{29} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -888,6 +916,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1.StatefulSetOrdinals") proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") @@ -895,146 +924,149 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) -} - -var fileDescriptor_e1014cab6f31e43b = []byte{ - // 2149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1b, 0xc7, - 0x19, 0xd7, 0xf2, 0x21, 0x51, 0x23, 0x4b, 0xb2, 0x47, 0xaa, 0xc4, 0xd8, 0x0d, 0xe9, 0xb2, 0xae, - 0xa3, 0xc4, 0x31, 0x59, 0x3b, 0x4e, 0x50, 0xc4, 0x45, 0x02, 0x91, 0x4a, 0xd3, 0x34, 0x7a, 0x75, - 0x64, 0x39, 0x80, 0x9b, 0x16, 0x1d, 0x2d, 0xc7, 0xd4, 0x46, 0xfb, 0xc2, 0xee, 0x2c, 0x63, 0xa2, - 0x97, 0xa2, 0x40, 0x6f, 0x3d, 0xf4, 0x3f, 0x29, 0x8a, 0xa2, 0xb9, 0x15, 0x41, 0xd0, 0x8b, 0x2f, - 0x45, 0x83, 0x5e, 0x9a, 0x13, 0x51, 0x33, 0xa7, 0xa2, 0xe8, 0xad, 0xbd, 0xf8, 0xd2, 0x62, 0x66, - 0x67, 0xdf, 0xb3, 0x22, 0x25, 0x27, 0xca, 0x03, 0xbe, 0x89, 0x33, 0xbf, 0xef, 0x37, 0xdf, 0xcc, - 0x7c, 0xdf, 0x7c, 0xbf, 0x99, 0x15, 0xb8, 0x7d, 0xf4, 0x3d, 0xb7, 0xa9, 0x59, 0xad, 0x23, 0xef, - 0x80, 0x38, 0x26, 0xa1, 0xc4, 0x6d, 0xf5, 0x89, 0xd9, 0xb5, 0x9c, 0x96, 0xe8, 0xc0, 0xb6, 0xd6, - 0xc2, 0xb6, 0xed, 0xb6, 0xfa, 0x37, 0x5a, 0x3d, 0x62, 0x12, 0x07, 0x53, 0xd2, 0x6d, 0xda, 0x8e, - 0x45, 0x2d, 0x08, 0x7d, 0x4c, 0x13, 0xdb, 0x5a, 0x93, 0x61, 0x9a, 0xfd, 0x1b, 0x17, 0xaf, 0xf7, - 0x34, 0x7a, 0xe8, 0x1d, 0x34, 0x55, 0xcb, 0x68, 0xf5, 0xac, 0x9e, 0xd5, 0xe2, 0xd0, 0x03, 0xef, - 0x3e, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x7c, 0x8a, 0x8b, 0x8d, 0xd8, 0x30, 0xaa, 0xe5, 0x10, 0xc9, - 0x30, 0x17, 0x6f, 0x45, 0x18, 0x03, 0xab, 0x87, 0x9a, 0x49, 0x9c, 0x41, 0xcb, 0x3e, 0xea, 0xb1, - 0x06, 0xb7, 0x65, 0x10, 0x8a, 0x65, 0x56, 0xad, 0x3c, 0x2b, 0xc7, 0x33, 0xa9, 0x66, 0x90, 0x8c, - 0xc1, 0x2b, 0xe3, 0x0c, 0x5c, 0xf5, 0x90, 0x18, 0x38, 0x63, 0xf7, 0x52, 0x9e, 0x9d, 0x47, 0x35, - 0xbd, 0xa5, 0x99, 0xd4, 0xa5, 0x4e, 0xda, 0xa8, 0xf1, 0x5f, 0x05, 0xc0, 0x8e, 0x65, 0x52, 0xc7, - 0xd2, 0x75, 0xe2, 0x20, 0xd2, 0xd7, 0x5c, 0xcd, 0x32, 0xe1, 0xcf, 0x41, 0x85, 0xcd, 0xa7, 0x8b, - 0x29, 0xae, 0x2a, 0x97, 0x95, 0xb5, 0xb9, 0x9b, 0xdf, 0x6d, 0x46, 0x8b, 0x1c, 0xd2, 0x37, 0xed, - 0xa3, 0x1e, 0x6b, 0x70, 0x9b, 0x0c, 0xdd, 0xec, 0xdf, 0x68, 0xee, 0x1c, 0xbc, 0x47, 0x54, 0xba, - 0x45, 0x28, 0x6e, 0xc3, 0x87, 0xc3, 0xfa, 0xd4, 0x68, 0x58, 0x07, 0x51, 0x1b, 0x0a, 0x59, 0xe1, - 0x0e, 0x28, 0x71, 0xf6, 0x02, 0x67, 0xbf, 0x9e, 0xcb, 0x2e, 0x26, 0xdd, 0x44, 0xf8, 0xfd, 0x37, - 0x1e, 0x50, 0x62, 0x32, 0xf7, 0xda, 0xe7, 0x04, 0x75, 0x69, 0x03, 0x53, 0x8c, 0x38, 0x11, 0x7c, - 0x11, 0x54, 0x1c, 0xe1, 0x7e, 0xb5, 0x78, 0x59, 0x59, 0x2b, 0xb6, 0xcf, 0x0b, 0x54, 0x25, 0x98, - 0x16, 0x0a, 0x11, 0x8d, 0x3f, 0x2b, 0x60, 0x25, 0x3b, 0xef, 0x4d, 0xcd, 0xa5, 0xf0, 0xdd, 0xcc, - 0xdc, 0x9b, 0x93, 0xcd, 0x9d, 0x59, 0xf3, 0x99, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0x79, 0xbf, 0x0d, - 0xca, 0x1a, 0x25, 0x86, 0x5b, 0x2d, 0x5c, 0x2e, 0xae, 0xcd, 0xdd, 0xbc, 0xda, 0xcc, 0xc6, 0x6e, - 0x33, 0xeb, 0x58, 0x7b, 0x5e, 0x50, 0x96, 0xdf, 0x62, 0xc6, 0xc8, 0xe7, 0x68, 0xfc, 0x4f, 0x01, - 0xb3, 0x1b, 0x98, 0x18, 0x96, 0xb9, 0x47, 0xe8, 0x19, 0x6c, 0x5a, 0x07, 0x94, 0x5c, 0x9b, 0xa8, - 0x62, 0xd3, 0xbe, 0x25, 0xf3, 0x3d, 0x74, 0x67, 0xcf, 0x26, 0x6a, 0xb4, 0x51, 0xec, 0x17, 0xe2, - 0xc6, 0xf0, 0x6d, 0x30, 0xed, 0x52, 0x4c, 0x3d, 0x97, 0x6f, 0xd3, 0xdc, 0xcd, 0x6f, 0x1f, 0x4f, - 0xc3, 0xa1, 0xed, 0x05, 0x41, 0x34, 0xed, 0xff, 0x46, 0x82, 0xa2, 0xf1, 0xcf, 0x02, 0x80, 0x21, - 0xb6, 0x63, 0x99, 0x5d, 0x8d, 0xb2, 0xf8, 0x7d, 0x15, 0x94, 0xe8, 0xc0, 0x26, 0x7c, 0x19, 0x66, - 0xdb, 0x57, 0x03, 0x2f, 0xee, 0x0c, 0x6c, 0xf2, 0x78, 0x58, 0x5f, 0xc9, 0x5a, 0xb0, 0x1e, 0xc4, - 0x6d, 0xe0, 0x66, 0xe8, 0x5f, 0x81, 0x5b, 0xdf, 0x4a, 0x0e, 0xfd, 0x78, 0x58, 0x97, 0x1c, 0x16, - 0xcd, 0x90, 0x29, 0xe9, 0x20, 0xec, 0x03, 0xa8, 0x63, 0x97, 0xde, 0x71, 0xb0, 0xe9, 0xfa, 0x23, - 0x69, 0x06, 0x11, 0x33, 0x7f, 0x61, 0xb2, 0xed, 0x61, 0x16, 0xed, 0x8b, 0xc2, 0x0b, 0xb8, 0x99, - 0x61, 0x43, 0x92, 0x11, 0xe0, 0x55, 0x30, 0xed, 0x10, 0xec, 0x5a, 0x66, 0xb5, 0xc4, 0x67, 0x11, - 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0xe7, 0xc1, 0x8c, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xb5, - 0xcc, 0x81, 0x8b, 0x02, 0x38, 0xb3, 0xe5, 0x37, 0xa3, 0xa0, 0xbf, 0xf1, 0x7b, 0x05, 0xcc, 0x87, - 0x2b, 0x77, 0x06, 0xa9, 0xd2, 0x4e, 0xa6, 0xca, 0xb3, 0xc7, 0xc6, 0x49, 0x4e, 0x86, 0x7c, 0x58, - 0x8c, 0xf9, 0xcc, 0x82, 0x10, 0xfe, 0x14, 0x54, 0x5c, 0xa2, 0x13, 0x95, 0x5a, 0x8e, 0xf0, 0xf9, - 0xa5, 0x09, 0x7d, 0xc6, 0x07, 0x44, 0xdf, 0x13, 0xa6, 0xed, 0x73, 0xcc, 0xe9, 0xe0, 0x17, 0x0a, - 0x29, 0xe1, 0x8f, 0x41, 0x85, 0x12, 0xc3, 0xd6, 0x31, 0x25, 0x22, 0x4d, 0x12, 0xf1, 0xcd, 0xc2, - 0x85, 0x91, 0xed, 0x5a, 0xdd, 0x3b, 0x02, 0xc6, 0x13, 0x25, 0x5c, 0x87, 0xa0, 0x15, 0x85, 0x34, - 0xf0, 0x08, 0x2c, 0x78, 0x76, 0x97, 0x21, 0x29, 0x3b, 0xba, 0x7b, 0x03, 0x11, 0x3e, 0xd7, 0x8e, - 0x5d, 0x90, 0xfd, 0x84, 0x49, 0x7b, 0x45, 0x0c, 0xb0, 0x90, 0x6c, 0x47, 0x29, 0x6a, 0xb8, 0x0e, - 0x16, 0x0d, 0xcd, 0x44, 0x04, 0x77, 0x07, 0x7b, 0x44, 0xb5, 0xcc, 0xae, 0xcb, 0x03, 0xa8, 0xdc, - 0x5e, 0x15, 0x04, 0x8b, 0x5b, 0xc9, 0x6e, 0x94, 0xc6, 0xc3, 0x4d, 0xb0, 0x1c, 0x9c, 0xb3, 0x3f, - 0xd4, 0x5c, 0x6a, 0x39, 0x83, 0x4d, 0xcd, 0xd0, 0x68, 0x75, 0x9a, 0xf3, 0x54, 0x47, 0xc3, 0xfa, - 0x32, 0x92, 0xf4, 0x23, 0xa9, 0x55, 0xe3, 0x37, 0xd3, 0x60, 0x31, 0x75, 0x1a, 0xc0, 0xbb, 0x60, - 0x45, 0xf5, 0x1c, 0x87, 0x98, 0x74, 0xdb, 0x33, 0x0e, 0x88, 0xb3, 0xa7, 0x1e, 0x92, 0xae, 0xa7, - 0x93, 0x2e, 0xdf, 0xd1, 0x72, 0xbb, 0x26, 0x7c, 0x5d, 0xe9, 0x48, 0x51, 0x28, 0xc7, 0x1a, 0xfe, - 0x08, 0x40, 0x93, 0x37, 0x6d, 0x69, 0xae, 0x1b, 0x72, 0x16, 0x38, 0x67, 0x98, 0x80, 0xdb, 0x19, - 0x04, 0x92, 0x58, 0x31, 0x1f, 0xbb, 0xc4, 0xd5, 0x1c, 0xd2, 0x4d, 0xfb, 0x58, 0x4c, 0xfa, 0xb8, - 0x21, 0x45, 0xa1, 0x1c, 0x6b, 0xf8, 0x32, 0x98, 0xf3, 0x47, 0xe3, 0x6b, 0x2e, 0x36, 0x67, 0x49, - 0x90, 0xcd, 0x6d, 0x47, 0x5d, 0x28, 0x8e, 0x63, 0x53, 0xb3, 0x0e, 0x5c, 0xe2, 0xf4, 0x49, 0xf7, - 0x4d, 0x5f, 0x03, 0xb0, 0x42, 0x59, 0xe6, 0x85, 0x32, 0x9c, 0xda, 0x4e, 0x06, 0x81, 0x24, 0x56, - 0x6c, 0x6a, 0x7e, 0xd4, 0x64, 0xa6, 0x36, 0x9d, 0x9c, 0xda, 0xbe, 0x14, 0x85, 0x72, 0xac, 0x59, - 0xec, 0xf9, 0x2e, 0xaf, 0xf7, 0xb1, 0xa6, 0xe3, 0x03, 0x9d, 0x54, 0x67, 0x92, 0xb1, 0xb7, 0x9d, - 0xec, 0x46, 0x69, 0x3c, 0x7c, 0x13, 0x5c, 0xf0, 0x9b, 0xf6, 0x4d, 0x1c, 0x92, 0x54, 0x38, 0xc9, - 0x33, 0x82, 0xe4, 0xc2, 0x76, 0x1a, 0x80, 0xb2, 0x36, 0xf0, 0x55, 0xb0, 0xa0, 0x5a, 0xba, 0xce, - 0xe3, 0xb1, 0x63, 0x79, 0x26, 0xad, 0xce, 0x72, 0x16, 0xc8, 0x72, 0xa8, 0x93, 0xe8, 0x41, 0x29, - 0x24, 0xbc, 0x07, 0x80, 0x1a, 0x94, 0x03, 0xb7, 0x0a, 0xf2, 0x0b, 0x7d, 0xb6, 0x0e, 0x45, 0x05, - 0x38, 0x6c, 0x72, 0x51, 0x8c, 0xad, 0xf1, 0xa1, 0x02, 0x56, 0x73, 0x72, 0x1c, 0xbe, 0x9e, 0xa8, - 0x7a, 0xd7, 0x52, 0x55, 0xef, 0x52, 0x8e, 0x59, 0xac, 0xf4, 0xa9, 0x60, 0x9e, 0xe9, 0x0e, 0xcd, - 0xec, 0xf9, 0x10, 0x71, 0x82, 0xbd, 0x20, 0xf3, 0x1d, 0xc5, 0x81, 0xd1, 0x31, 0x7c, 0x61, 0x34, - 0xac, 0xcf, 0x27, 0xfa, 0x50, 0x92, 0xb3, 0xf1, 0xab, 0x02, 0x00, 0x1b, 0xc4, 0xd6, 0xad, 0x81, - 0x41, 0xcc, 0xb3, 0x50, 0x2d, 0x1b, 0x09, 0xd5, 0xd2, 0x90, 0x6e, 0x44, 0xe8, 0x4f, 0xae, 0x6c, - 0xd9, 0x4c, 0xc9, 0x96, 0x2b, 0x63, 0x78, 0x8e, 0xd7, 0x2d, 0x7f, 0x2f, 0x82, 0xa5, 0x08, 0x1c, - 0x09, 0x97, 0xdb, 0x89, 0x2d, 0x7c, 0x2e, 0xb5, 0x85, 0xab, 0x12, 0x93, 0xcf, 0x4d, 0xb9, 0xbc, - 0x07, 0x16, 0x98, 0xae, 0xf0, 0x77, 0x8d, 0xab, 0x96, 0xe9, 0x13, 0xab, 0x96, 0xb0, 0xea, 0x6c, - 0x26, 0x98, 0x50, 0x8a, 0x39, 0x47, 0x25, 0xcd, 0x7c, 0x15, 0x55, 0xd2, 0x1f, 0x14, 0xb0, 0x10, - 0x6d, 0xd3, 0x19, 0xc8, 0xa4, 0x4e, 0x52, 0x26, 0xd5, 0x8e, 0x8f, 0xcb, 0x1c, 0x9d, 0xf4, 0xb7, - 0x52, 0xdc, 0x6b, 0x2e, 0x94, 0xd6, 0xd8, 0x85, 0xca, 0xd6, 0x35, 0x15, 0xbb, 0xa2, 0xac, 0x9e, - 0xf3, 0x2f, 0x53, 0x7e, 0x1b, 0x0a, 0x7b, 0x13, 0x92, 0xaa, 0xf0, 0xf9, 0x4a, 0xaa, 0xe2, 0x67, - 0x23, 0xa9, 0xee, 0x80, 0x8a, 0x1b, 0x88, 0xa9, 0x12, 0xa7, 0xbc, 0x3a, 0x2e, 0x9d, 0x85, 0x8e, - 0x0a, 0x59, 0x43, 0x05, 0x15, 0x32, 0xc9, 0xb4, 0x53, 0xf9, 0x8b, 0xd4, 0x4e, 0x2c, 0xbc, 0x6d, - 0xec, 0xb9, 0xa4, 0xcb, 0x53, 0xa9, 0x12, 0x85, 0xf7, 0x2e, 0x6f, 0x45, 0xa2, 0x17, 0xee, 0x83, - 0x55, 0xdb, 0xb1, 0x7a, 0x0e, 0x71, 0xdd, 0x0d, 0x82, 0xbb, 0xba, 0x66, 0x92, 0x60, 0x02, 0x7e, - 0xd5, 0xbb, 0x34, 0x1a, 0xd6, 0x57, 0x77, 0xe5, 0x10, 0x94, 0x67, 0xdb, 0xf8, 0x53, 0x09, 0x9c, - 0x4f, 0x9f, 0x88, 0x39, 0x42, 0x44, 0x39, 0x95, 0x10, 0x79, 0x31, 0x16, 0xa2, 0xbe, 0x4a, 0x8b, - 0xdd, 0xf9, 0x33, 0x61, 0xba, 0x0e, 0x16, 0x85, 0xf0, 0x08, 0x3a, 0x85, 0x14, 0x0b, 0xb7, 0x67, - 0x3f, 0xd9, 0x8d, 0xd2, 0x78, 0x78, 0x1b, 0xcc, 0x3b, 0x5c, 0x5b, 0x05, 0x04, 0xbe, 0x3e, 0xf9, - 0x86, 0x20, 0x98, 0x47, 0xf1, 0x4e, 0x94, 0xc4, 0x32, 0x6d, 0x12, 0x49, 0x8e, 0x80, 0xa0, 0x94, - 0xd4, 0x26, 0xeb, 0x69, 0x00, 0xca, 0xda, 0xc0, 0x2d, 0xb0, 0xe4, 0x99, 0x59, 0x2a, 0x3f, 0xd6, - 0x2e, 0x09, 0xaa, 0xa5, 0xfd, 0x2c, 0x04, 0xc9, 0xec, 0xe0, 0x4f, 0x12, 0x72, 0x65, 0x9a, 0x9f, - 0x22, 0xcf, 0x1d, 0x9f, 0x0e, 0x13, 0xeb, 0x15, 0x89, 0x8e, 0xaa, 0x4c, 0xaa, 0xa3, 0x1a, 0x1f, - 0x28, 0x00, 0x66, 0x53, 0x70, 0xec, 0xe5, 0x3e, 0x63, 0x11, 0x2b, 0x91, 0x5d, 0xb9, 0xc2, 0xb9, - 0x36, 0x5e, 0xe1, 0x44, 0x27, 0xe8, 0x64, 0x12, 0x47, 0x2c, 0xef, 0xd9, 0x3c, 0xcc, 0x4c, 0x20, - 0x71, 0x22, 0x7f, 0x9e, 0x4c, 0xe2, 0xc4, 0x78, 0x8e, 0x97, 0x38, 0xff, 0x2a, 0x80, 0xa5, 0x08, - 0x3c, 0xb1, 0xc4, 0x91, 0x98, 0x3c, 0x7d, 0x9c, 0x99, 0x4c, 0x76, 0x44, 0x4b, 0xf7, 0x25, 0x91, - 0x1d, 0x91, 0x43, 0x39, 0xb2, 0xe3, 0x77, 0x85, 0xb8, 0xd7, 0x27, 0x94, 0x1d, 0x9f, 0xc1, 0x53, - 0xc5, 0x57, 0x4e, 0xb9, 0x34, 0x3e, 0x2a, 0x82, 0xf3, 0xe9, 0x14, 0x4c, 0xd4, 0x41, 0x65, 0x6c, - 0x1d, 0xdc, 0x05, 0xcb, 0xf7, 0x3d, 0x5d, 0x1f, 0xf0, 0x39, 0xc4, 0x8a, 0xa1, 0x5f, 0x41, 0xbf, - 0x29, 0x2c, 0x97, 0x7f, 0x20, 0xc1, 0x20, 0xa9, 0x65, 0xb6, 0x2c, 0x96, 0x9e, 0xb4, 0x2c, 0x96, - 0x4f, 0x51, 0x16, 0xe5, 0xca, 0xa2, 0x78, 0x2a, 0x65, 0x31, 0x71, 0x4d, 0x94, 0x1c, 0x57, 0x63, - 0xef, 0xf0, 0x23, 0x05, 0xac, 0xc8, 0xaf, 0xcf, 0x50, 0x07, 0x0b, 0x06, 0x7e, 0x10, 0x7f, 0xbc, - 0x18, 0x57, 0x30, 0x3c, 0xaa, 0xe9, 0x4d, 0xff, 0xeb, 0x4e, 0xf3, 0x2d, 0x93, 0xee, 0x38, 0x7b, - 0xd4, 0xd1, 0xcc, 0x9e, 0x5f, 0x60, 0xb7, 0x12, 0x5c, 0x28, 0xc5, 0x0d, 0xef, 0x81, 0x8a, 0x81, - 0x1f, 0xec, 0x79, 0x4e, 0x2f, 0x28, 0x84, 0x27, 0x1f, 0x87, 0xc7, 0xfe, 0x96, 0x60, 0x41, 0x21, - 0x5f, 0xe3, 0x53, 0x05, 0xac, 0xe6, 0x54, 0xd0, 0xaf, 0xd1, 0x2c, 0x77, 0xc0, 0xe5, 0xc4, 0x24, - 0x59, 0x42, 0x92, 0xfb, 0x9e, 0xce, 0x73, 0x53, 0xe8, 0x95, 0x6b, 0x60, 0xd6, 0xc6, 0x0e, 0xd5, - 0x42, 0xa1, 0x5b, 0x6e, 0xcf, 0x8f, 0x86, 0xf5, 0xd9, 0xdd, 0xa0, 0x11, 0x45, 0xfd, 0x8d, 0x5f, - 0x17, 0xc0, 0x5c, 0x8c, 0xe4, 0x0c, 0xb4, 0xc3, 0x1b, 0x09, 0xed, 0x20, 0xfd, 0x1a, 0x13, 0x9f, - 0x55, 0x9e, 0x78, 0xd8, 0x4a, 0x89, 0x87, 0xef, 0x8c, 0x23, 0x3a, 0x5e, 0x3d, 0xfc, 0xbb, 0x00, - 0x96, 0x63, 0xe8, 0x48, 0x3e, 0x7c, 0x3f, 0x21, 0x1f, 0xd6, 0x52, 0xf2, 0xa1, 0x2a, 0xb3, 0x79, - 0xaa, 0x1f, 0xc6, 0xeb, 0x87, 0x3f, 0x2a, 0x60, 0x31, 0xb6, 0x76, 0x67, 0x20, 0x20, 0x36, 0x92, - 0x02, 0xa2, 0x3e, 0x26, 0x5e, 0x72, 0x14, 0xc4, 0x7f, 0x14, 0xd0, 0x8a, 0xa1, 0x76, 0x89, 0xe3, - 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x6b, 0xe9, 0x9e, 0x41, 0x3a, 0x3a, 0xd6, 0x0c, 0x44, 0x58, 0x83, - 0x66, 0x99, 0xbb, 0x96, 0xae, 0xa9, 0x03, 0x88, 0xc1, 0xdc, 0xfb, 0x87, 0xc4, 0xdc, 0x20, 0x3a, - 0xa1, 0xe2, 0x9b, 0xc1, 0x6c, 0xfb, 0xf5, 0xe0, 0x09, 0xfd, 0x9d, 0xa8, 0xeb, 0xf1, 0xb0, 0xbe, - 0x36, 0x09, 0x23, 0x0f, 0xb0, 0x38, 0x27, 0xfc, 0x19, 0x00, 0xec, 0xe7, 0x9e, 0x8a, 0x83, 0x2f, - 0x08, 0xb3, 0xed, 0xd7, 0x82, 0x34, 0x7c, 0x27, 0xec, 0x39, 0xd1, 0x00, 0x31, 0xc6, 0xc6, 0x5f, - 0x67, 0x12, 0xdb, 0xf5, 0xb5, 0x7f, 0xb0, 0xf9, 0x05, 0x58, 0xee, 0x47, 0xab, 0x13, 0x00, 0x98, - 0xd0, 0x60, 0xb1, 0xf3, 0xbc, 0x94, 0x5e, 0xb6, 0xae, 0x91, 0xbc, 0xb9, 0x2b, 0xa1, 0x43, 0xd2, - 0x41, 0xe0, 0xcb, 0x60, 0x8e, 0x09, 0x04, 0x4d, 0x25, 0xdb, 0xd8, 0x08, 0x52, 0x29, 0xfc, 0xe4, - 0xb2, 0x17, 0x75, 0xa1, 0x38, 0x0e, 0x1e, 0x82, 0x25, 0xdb, 0xea, 0x6e, 0x61, 0x13, 0xf7, 0x08, - 0x2b, 0x7b, 0xfe, 0x56, 0xf2, 0xa7, 0x9c, 0xd9, 0xf6, 0x2b, 0xc1, 0x35, 0x7d, 0x37, 0x0b, 0x61, - 0xd7, 0x20, 0x49, 0x33, 0x0f, 0x02, 0x19, 0x25, 0x34, 0x32, 0x5f, 0x08, 0x67, 0x32, 0xff, 0x56, - 0x21, 0xcb, 0xa9, 0x53, 0x7e, 0x23, 0xcc, 0x7b, 0xa4, 0xaa, 0x9c, 0xea, 0x91, 0x4a, 0x22, 0xe3, - 0x67, 0x4f, 0x28, 0xe3, 0x3f, 0x52, 0xc0, 0x15, 0x7b, 0x82, 0x34, 0xaa, 0x02, 0xbe, 0x2c, 0x9d, - 0x31, 0xcb, 0x32, 0x49, 0x46, 0xb6, 0xd7, 0x46, 0xc3, 0xfa, 0x95, 0x49, 0x90, 0x68, 0x22, 0xd7, - 0x1a, 0x1f, 0x94, 0xc1, 0x85, 0x4c, 0x79, 0xfc, 0x02, 0x5f, 0xcb, 0x32, 0x9a, 0xbe, 0x78, 0x02, - 0x4d, 0xbf, 0x0e, 0x16, 0xc5, 0x27, 0xd6, 0xd4, 0x95, 0x20, 0xdc, 0xd3, 0x4e, 0xb2, 0x1b, 0xa5, - 0xf1, 0xb2, 0xd7, 0xba, 0xf2, 0x09, 0x5f, 0xeb, 0xe2, 0x5e, 0x88, 0xff, 0x0c, 0xf2, 0x93, 0x2f, - 0xeb, 0x85, 0xf8, 0x07, 0xa1, 0x34, 0x1e, 0xbe, 0x16, 0x64, 0x56, 0xc8, 0x30, 0xc3, 0x19, 0x52, - 0xa9, 0x12, 0x12, 0xa4, 0xd0, 0x4f, 0xf4, 0x19, 0xf1, 0x5d, 0xc9, 0x67, 0xc4, 0xb5, 0x31, 0xa1, - 0x3b, 0xf9, 0xc3, 0x9c, 0xf4, 0xda, 0x35, 0x77, 0xf2, 0x6b, 0x57, 0xe3, 0x2f, 0x0a, 0x78, 0x26, - 0xf7, 0x4c, 0x81, 0xeb, 0x09, 0xb9, 0x76, 0x3d, 0x25, 0xd7, 0x9e, 0xcd, 0x35, 0x8c, 0x69, 0x36, - 0x43, 0xfe, 0x66, 0x77, 0x6b, 0xec, 0x9b, 0x9d, 0x44, 0x8c, 0x8f, 0x7f, 0xbc, 0x6b, 0xaf, 0x3d, - 0x7c, 0x54, 0x9b, 0xfa, 0xf8, 0x51, 0x6d, 0xea, 0x93, 0x47, 0xb5, 0xa9, 0x5f, 0x8e, 0x6a, 0xca, - 0xc3, 0x51, 0x4d, 0xf9, 0x78, 0x54, 0x53, 0x3e, 0x19, 0xd5, 0x94, 0x7f, 0x8c, 0x6a, 0xca, 0x6f, - 0x3f, 0xad, 0x4d, 0xdd, 0x2b, 0xf4, 0x6f, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x7f, 0x6b, - 0xc0, 0xb5, 0x28, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/apps/v1/generated.proto", fileDescriptor_5b781835628d5338) +} + +var fileDescriptor_5b781835628d5338 = []byte{ + // 2194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0xf2, 0x43, 0xa2, 0x86, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x1b, 0xd2, 0xdd, 0xb8, + 0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0xe0, 0x14, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab, + 0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x68, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x0b, + 0xbd, 0x14, 0x05, 0x7a, 0xeb, 0xa1, 0x7f, 0x43, 0xff, 0x81, 0xa2, 0x28, 0x9a, 0x5b, 0x10, 0x04, + 0xbd, 0xf8, 0x52, 0x20, 0xe8, 0xa5, 0x39, 0x11, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x2f, + 0x2d, 0x66, 0x76, 0xf6, 0x7b, 0x56, 0xa4, 0xe4, 0x58, 0x69, 0x82, 0xdc, 0xb8, 0x33, 0xbf, 0xf7, + 0xdb, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x2c, 0x81, 0x7a, 0xff, 0x55, 0xaf, 0xa9, 0xdb, 0x2d, + 0xec, 0xe8, 0x2d, 0xec, 0x38, 0x5e, 0xeb, 0xe0, 0x7a, 0xab, 0x4f, 0x2c, 0xe2, 0x62, 0x4a, 0x7a, + 0x4d, 0xc7, 0xb5, 0xa9, 0x0d, 0xa1, 0x8f, 0x69, 0x62, 0x47, 0x6f, 0x32, 0x4c, 0xf3, 0xe0, 0xfa, + 0xf9, 0x6b, 0x7d, 0x9d, 0xee, 0x0f, 0xf6, 0x9a, 0x9a, 0x6d, 0xb6, 0xfa, 0x76, 0xdf, 0x6e, 0x71, + 0xe8, 0xde, 0xe0, 0x1e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x4f, 0x71, 0x3e, 0xfe, 0x1a, 0xcd, 0x76, + 0x89, 0xe4, 0x35, 0xe7, 0x6f, 0x46, 0x18, 0x13, 0x6b, 0xfb, 0xba, 0x45, 0xdc, 0xc3, 0x96, 0x73, + 0xbf, 0xcf, 0x1a, 0xbc, 0x96, 0x49, 0x28, 0x96, 0x59, 0xb5, 0xf2, 0xac, 0xdc, 0x81, 0x45, 0x75, + 0x93, 0x64, 0x0c, 0x5e, 0x19, 0x67, 0xe0, 0x69, 0xfb, 0xc4, 0xc4, 0x19, 0xbb, 0x97, 0xf2, 0xec, + 0x06, 0x54, 0x37, 0x5a, 0xba, 0x45, 0x3d, 0xea, 0xa6, 0x8d, 0xd4, 0xff, 0x28, 0x00, 0x76, 0x6c, + 0x8b, 0xba, 0xb6, 0x61, 0x10, 0x17, 0x91, 0x03, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x1c, 0x54, 0xd8, + 0x78, 0x7a, 0x98, 0xe2, 0x9a, 0x72, 0x51, 0x59, 0xad, 0xde, 0xf8, 0x5e, 0x33, 0x9a, 0xe4, 0x90, + 0xbe, 0xe9, 0xdc, 0xef, 0xb3, 0x06, 0xaf, 0xc9, 0xd0, 0xcd, 0x83, 0xeb, 0xcd, 0xed, 0xbd, 0xf7, + 0x89, 0x46, 0x37, 0x09, 0xc5, 0x6d, 0xf8, 0x70, 0xd8, 0x98, 0x1a, 0x0d, 0x1b, 0x20, 0x6a, 0x43, + 0x21, 0x2b, 0xdc, 0x06, 0x25, 0xce, 0x5e, 0xe0, 0xec, 0xd7, 0x72, 0xd9, 0xc5, 0xa0, 0x9b, 0x08, + 0x7f, 0xf0, 0xe6, 0x03, 0x4a, 0x2c, 0xe6, 0x5e, 0xfb, 0x8c, 0xa0, 0x2e, 0xad, 0x63, 0x8a, 0x11, + 0x27, 0x82, 0x2f, 0x82, 0x8a, 0x2b, 0xdc, 0xaf, 0x15, 0x2f, 0x2a, 0xab, 0xc5, 0xf6, 0x59, 0x81, + 0xaa, 0x04, 0xc3, 0x42, 0x21, 0x42, 0xfd, 0xb3, 0x02, 0x96, 0xb3, 0xe3, 0xde, 0xd0, 0x3d, 0x0a, + 0xdf, 0xcb, 0x8c, 0xbd, 0x39, 0xd9, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xf8, 0xe2, 0xa0, 0x25, 0x36, + 0xee, 0x77, 0x40, 0x59, 0xa7, 0xc4, 0xf4, 0x6a, 0x85, 0x8b, 0xc5, 0xd5, 0xea, 0x8d, 0xcb, 0xcd, + 0x6c, 0xec, 0x36, 0xb3, 0x8e, 0xb5, 0xe7, 0x04, 0x65, 0xf9, 0x6d, 0x66, 0x8c, 0x7c, 0x0e, 0xf5, + 0xbf, 0x0a, 0x98, 0x5d, 0xc7, 0xc4, 0xb4, 0xad, 0x2e, 0xa1, 0xa7, 0xb0, 0x68, 0x1d, 0x50, 0xf2, + 0x1c, 0xa2, 0x89, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xd0, 0x9d, 0xae, 0x43, 0xb4, 0x68, 0xa1, 0xd8, + 0x13, 0xe2, 0xc6, 0xf0, 0x1d, 0x30, 0xed, 0x51, 0x4c, 0x07, 0x1e, 0x5f, 0xa6, 0xea, 0x8d, 0xe7, + 0x8e, 0xa6, 0xe1, 0xd0, 0xf6, 0xbc, 0x20, 0x9a, 0xf6, 0x9f, 0x91, 0xa0, 0x50, 0xff, 0x51, 0x00, + 0x30, 0xc4, 0x76, 0x6c, 0xab, 0xa7, 0x53, 0x16, 0xbf, 0xb7, 0x40, 0x89, 0x1e, 0x3a, 0x84, 0x4f, + 0xc3, 0x6c, 0xfb, 0x72, 0xe0, 0xc5, 0xed, 0x43, 0x87, 0x3c, 0x1e, 0x36, 0x96, 0xb3, 0x16, 0xac, + 0x07, 0x71, 0x1b, 0xb8, 0x11, 0xfa, 0x57, 0xe0, 0xd6, 0x37, 0x93, 0xaf, 0x7e, 0x3c, 0x6c, 0x48, + 0x36, 0x8b, 0x66, 0xc8, 0x94, 0x74, 0x10, 0x1e, 0x00, 0x68, 0x60, 0x8f, 0xde, 0x76, 0xb1, 0xe5, + 0xf9, 0x6f, 0xd2, 0x4d, 0x22, 0x46, 0xfe, 0xc2, 0x64, 0xcb, 0xc3, 0x2c, 0xda, 0xe7, 0x85, 0x17, + 0x70, 0x23, 0xc3, 0x86, 0x24, 0x6f, 0x80, 0x97, 0xc1, 0xb4, 0x4b, 0xb0, 0x67, 0x5b, 0xb5, 0x12, + 0x1f, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x9f, 0x07, 0x33, 0x26, 0xf1, 0x3c, 0xdc, + 0x27, 0xb5, 0x32, 0x07, 0x2e, 0x08, 0xe0, 0xcc, 0xa6, 0xdf, 0x8c, 0x82, 0x7e, 0xf5, 0x0f, 0x0a, + 0x98, 0x0b, 0x67, 0xee, 0x14, 0x52, 0xa5, 0x9d, 0x4c, 0x95, 0x67, 0x8f, 0x8c, 0x93, 0x9c, 0x0c, + 0xf9, 0xb8, 0x18, 0xf3, 0x99, 0x05, 0x21, 0xfc, 0x29, 0xa8, 0x78, 0xc4, 0x20, 0x1a, 0xb5, 0x5d, + 0xe1, 0xf3, 0x4b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xd1, 0x15, 0xa6, 0xed, 0x33, 0xcc, 0xe9, 0xe0, + 0x09, 0x85, 0x94, 0xf0, 0xc7, 0xa0, 0x42, 0x89, 0xe9, 0x18, 0x98, 0x12, 0x91, 0x26, 0x89, 0xf8, + 0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xec, 0xde, 0x6d, 0x01, 0xe3, 0x89, 0x12, 0xce, 0x43, 0xd0, 0x8a, + 0x42, 0x1a, 0x78, 0x1f, 0xcc, 0x0f, 0x9c, 0x1e, 0x43, 0x52, 0xb6, 0x75, 0xf7, 0x0f, 0x45, 0xf8, + 0x5c, 0x3d, 0x72, 0x42, 0x76, 0x13, 0x26, 0xed, 0x65, 0xf1, 0x82, 0xf9, 0x64, 0x3b, 0x4a, 0x51, + 0xc3, 0x35, 0xb0, 0x60, 0xea, 0x16, 0x22, 0xb8, 0x77, 0xd8, 0x25, 0x9a, 0x6d, 0xf5, 0x3c, 0x1e, + 0x40, 0xe5, 0xf6, 0x8a, 0x20, 0x58, 0xd8, 0x4c, 0x76, 0xa3, 0x34, 0x1e, 0x6e, 0x80, 0xa5, 0x60, + 0x9f, 0xfd, 0xa1, 0xee, 0x51, 0xdb, 0x3d, 0xdc, 0xd0, 0x4d, 0x9d, 0xd6, 0xa6, 0x39, 0x4f, 0x6d, + 0x34, 0x6c, 0x2c, 0x21, 0x49, 0x3f, 0x92, 0x5a, 0xa9, 0xbf, 0x99, 0x06, 0x0b, 0xa9, 0xdd, 0x00, + 0xde, 0x01, 0xcb, 0xda, 0xc0, 0x75, 0x89, 0x45, 0xb7, 0x06, 0xe6, 0x1e, 0x71, 0xbb, 0xda, 0x3e, + 0xe9, 0x0d, 0x0c, 0xd2, 0xe3, 0x2b, 0x5a, 0x6e, 0xd7, 0x85, 0xaf, 0xcb, 0x1d, 0x29, 0x0a, 0xe5, + 0x58, 0xc3, 0x1f, 0x01, 0x68, 0xf1, 0xa6, 0x4d, 0xdd, 0xf3, 0x42, 0xce, 0x02, 0xe7, 0x0c, 0x13, + 0x70, 0x2b, 0x83, 0x40, 0x12, 0x2b, 0xe6, 0x63, 0x8f, 0x78, 0xba, 0x4b, 0x7a, 0x69, 0x1f, 0x8b, + 0x49, 0x1f, 0xd7, 0xa5, 0x28, 0x94, 0x63, 0x0d, 0x5f, 0x06, 0x55, 0xff, 0x6d, 0x7c, 0xce, 0xc5, + 0xe2, 0x2c, 0x0a, 0xb2, 0xea, 0x56, 0xd4, 0x85, 0xe2, 0x38, 0x36, 0x34, 0x7b, 0xcf, 0x23, 0xee, + 0x01, 0xe9, 0xbd, 0xe5, 0x6b, 0x00, 0x56, 0x28, 0xcb, 0xbc, 0x50, 0x86, 0x43, 0xdb, 0xce, 0x20, + 0x90, 0xc4, 0x8a, 0x0d, 0xcd, 0x8f, 0x9a, 0xcc, 0xd0, 0xa6, 0x93, 0x43, 0xdb, 0x95, 0xa2, 0x50, + 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xb5, 0x03, 0xac, 0x1b, 0x78, 0xcf, 0x20, 0xb5, 0x99, 0x64, + 0xec, 0x6d, 0x25, 0xbb, 0x51, 0x1a, 0x0f, 0xdf, 0x02, 0xe7, 0xfc, 0xa6, 0x5d, 0x0b, 0x87, 0x24, + 0x15, 0x4e, 0xf2, 0x8c, 0x20, 0x39, 0xb7, 0x95, 0x06, 0xa0, 0xac, 0x0d, 0xbc, 0x05, 0xe6, 0x35, + 0xdb, 0x30, 0x78, 0x3c, 0x76, 0xec, 0x81, 0x45, 0x6b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0xea, 0x24, + 0x7a, 0x50, 0x0a, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x81, 0xfc, 0x42, 0x9f, 0xad, + 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x63, 0x53, 0x3f, 0x56, 0xc0, 0x4a, 0x4e, 0x8e, 0xc3, + 0x37, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d, 0xcc, 0x31, + 0xdd, 0xa1, 0x5b, 0x7d, 0x1f, 0x22, 0x76, 0xb0, 0x17, 0x64, 0xbe, 0xa3, 0x38, 0x30, 0xda, 0x86, + 0xcf, 0x8d, 0x86, 0x8d, 0xb9, 0x44, 0x1f, 0x4a, 0x72, 0xaa, 0xbf, 0x2a, 0x00, 0xb0, 0x4e, 0x1c, + 0xc3, 0x3e, 0x34, 0x89, 0x75, 0x1a, 0xaa, 0x65, 0x3d, 0xa1, 0x5a, 0x54, 0xe9, 0x42, 0x84, 0xfe, + 0xe4, 0xca, 0x96, 0x8d, 0x94, 0x6c, 0xb9, 0x34, 0x86, 0xe7, 0x68, 0xdd, 0xf2, 0xb7, 0x22, 0x58, + 0x8c, 0xc0, 0x91, 0x70, 0x79, 0x2d, 0xb1, 0x84, 0x57, 0x52, 0x4b, 0xb8, 0x22, 0x31, 0x79, 0x6a, + 0xca, 0xe5, 0x7d, 0x30, 0xcf, 0x74, 0x85, 0xbf, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, 0x84, + 0x55, 0x67, 0x23, 0xc1, 0x84, 0x52, 0xcc, 0x39, 0x2a, 0x69, 0xe6, 0xab, 0xa8, 0x92, 0xfe, 0xa8, + 0x80, 0xf9, 0x68, 0x99, 0x4e, 0x41, 0x26, 0x75, 0x92, 0x32, 0xa9, 0x7e, 0x74, 0x5c, 0xe6, 0xe8, + 0xa4, 0xbf, 0x96, 0xe2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0x8e, 0xa1, 0x6b, 0xd8, 0x13, + 0x65, 0xf5, 0x8c, 0x7f, 0x98, 0xf2, 0xdb, 0x50, 0xd8, 0x9b, 0x90, 0x54, 0x85, 0xa7, 0x2b, 0xa9, + 0x8a, 0x5f, 0x8c, 0xa4, 0xba, 0x0d, 0x2a, 0x5e, 0x20, 0xa6, 0x4a, 0x9c, 0xf2, 0xf2, 0xb8, 0x74, + 0x16, 0x3a, 0x2a, 0x64, 0x0d, 0x15, 0x54, 0xc8, 0x24, 0xd3, 0x4e, 0xe5, 0x2f, 0x53, 0x3b, 0xb1, + 0xf0, 0x76, 0xf0, 0xc0, 0x23, 0x3d, 0x9e, 0x4a, 0x95, 0x28, 0xbc, 0x77, 0x78, 0x2b, 0x12, 0xbd, + 0x70, 0x17, 0xac, 0x38, 0xae, 0xdd, 0x77, 0x89, 0xe7, 0xad, 0x13, 0xdc, 0x33, 0x74, 0x8b, 0x04, + 0x03, 0xf0, 0xab, 0xde, 0x85, 0xd1, 0xb0, 0xb1, 0xb2, 0x23, 0x87, 0xa0, 0x3c, 0x5b, 0xf5, 0xa3, + 0x12, 0x38, 0x9b, 0xde, 0x11, 0x73, 0x84, 0x88, 0x72, 0x22, 0x21, 0xf2, 0x62, 0x2c, 0x44, 0x7d, + 0x95, 0x16, 0x3b, 0xf3, 0x67, 0xc2, 0x74, 0x0d, 0x2c, 0x08, 0xe1, 0x11, 0x74, 0x0a, 0x29, 0x16, + 0x2e, 0xcf, 0x6e, 0xb2, 0x1b, 0xa5, 0xf1, 0xf0, 0x35, 0x30, 0xe7, 0x72, 0x6d, 0x15, 0x10, 0xf8, + 0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0xc5, 0x3b, 0x51, 0x12, 0xcb, 0xb4, 0x49, 0x24, 0x39, 0x02, + 0x82, 0x52, 0x52, 0x9b, 0xac, 0xa5, 0x01, 0x28, 0x6b, 0x03, 0x37, 0xc1, 0xe2, 0xc0, 0xca, 0x52, + 0xf9, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0x66, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x92, 0x90, 0x2b, + 0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x32, 0xa9, 0x8e, + 0x52, 0x3f, 0x54, 0x00, 0xcc, 0xa6, 0xe0, 0xd8, 0xc3, 0x7d, 0xc6, 0x22, 0x56, 0x22, 0x7b, 0x72, + 0x85, 0x73, 0x75, 0xbc, 0xc2, 0x89, 0x76, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, 0x98, + 0x99, 0x40, 0xe2, 0x44, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf1, 0x1c, 0x2d, 0x71, 0xfe, 0x59, 0x00, + 0x8b, 0x11, 0x78, 0x62, 0x89, 0x23, 0x31, 0xf9, 0xe6, 0x72, 0x66, 0x32, 0xd9, 0x11, 0x4d, 0xdd, + 0xff, 0x89, 0xec, 0x88, 0x1c, 0xca, 0x91, 0x1d, 0xbf, 0x2f, 0xc4, 0xbd, 0x3e, 0xa6, 0xec, 0xf8, + 0x02, 0xae, 0x2a, 0xbe, 0x72, 0xca, 0x45, 0xfd, 0xa4, 0x08, 0xce, 0xa6, 0x53, 0x30, 0x51, 0x07, + 0x95, 0xb1, 0x75, 0x70, 0x07, 0x2c, 0xdd, 0x1b, 0x18, 0xc6, 0x21, 0x1f, 0x43, 0xac, 0x18, 0xfa, + 0x15, 0xf4, 0xdb, 0xc2, 0x72, 0xe9, 0x07, 0x12, 0x0c, 0x92, 0x5a, 0x66, 0xcb, 0x62, 0xe9, 0x49, + 0xcb, 0x62, 0xf9, 0x04, 0x65, 0x51, 0xae, 0x2c, 0x8a, 0x27, 0x52, 0x16, 0x13, 0xd7, 0x44, 0xc9, + 0x76, 0x35, 0xf6, 0x0c, 0x3f, 0x52, 0xc0, 0xb2, 0xfc, 0xf8, 0x0c, 0x0d, 0x30, 0x6f, 0xe2, 0x07, + 0xf1, 0xcb, 0x8b, 0x71, 0x05, 0x63, 0x40, 0x75, 0xa3, 0xe9, 0x7f, 0xdd, 0x69, 0xbe, 0x6d, 0xd1, + 0x6d, 0xb7, 0x4b, 0x5d, 0xdd, 0xea, 0xfb, 0x05, 0x76, 0x33, 0xc1, 0x85, 0x52, 0xdc, 0xf0, 0x2e, + 0xa8, 0x98, 0xf8, 0x41, 0x77, 0xe0, 0xf6, 0x83, 0x42, 0x78, 0xfc, 0xf7, 0xf0, 0xd8, 0xdf, 0x14, + 0x2c, 0x28, 0xe4, 0x53, 0x3f, 0x57, 0xc0, 0x4a, 0x4e, 0x05, 0xfd, 0x1a, 0x8d, 0xf2, 0x23, 0x05, + 0x5c, 0x4c, 0x8c, 0x92, 0x65, 0x24, 0xb9, 0x37, 0x30, 0x78, 0x72, 0x0a, 0xc1, 0x72, 0x15, 0xcc, + 0x3a, 0xd8, 0xa5, 0x7a, 0xa8, 0x74, 0xcb, 0xed, 0xb9, 0xd1, 0xb0, 0x31, 0xbb, 0x13, 0x34, 0xa2, + 0xa8, 0x5f, 0x32, 0x37, 0x85, 0xa7, 0x37, 0x37, 0xea, 0xaf, 0x0b, 0xa0, 0x1a, 0x73, 0xf9, 0x14, + 0xa4, 0xca, 0x9b, 0x09, 0xa9, 0x22, 0xfd, 0xf8, 0x13, 0x9f, 0xc3, 0x3c, 0xad, 0xb2, 0x99, 0xd2, + 0x2a, 0xdf, 0x1d, 0x47, 0x74, 0xb4, 0x58, 0xf9, 0x57, 0x01, 0x2c, 0xc5, 0xd0, 0x91, 0x5a, 0xf9, + 0x7e, 0x42, 0xad, 0xac, 0xa6, 0xd4, 0x4a, 0x4d, 0x66, 0xf3, 0x8d, 0x5c, 0x19, 0x2f, 0x57, 0xfe, + 0xa4, 0x80, 0x85, 0xd8, 0xdc, 0x9d, 0x82, 0x5e, 0x59, 0x4f, 0xea, 0x95, 0xc6, 0x98, 0x78, 0xc9, + 0x11, 0x2c, 0xb7, 0xc0, 0x62, 0x0c, 0xb4, 0xed, 0xf6, 0x74, 0x0b, 0x1b, 0x1e, 0x7c, 0x0e, 0x94, + 0x3d, 0x8a, 0x5d, 0x1a, 0x64, 0x77, 0x60, 0xdb, 0x65, 0x8d, 0xc8, 0xef, 0x53, 0xff, 0xad, 0x80, + 0x56, 0xcc, 0x78, 0x87, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x3b, 0xb6, 0x31, 0x30, 0x49, 0xc7, + 0xc0, 0xba, 0x89, 0x08, 0x6b, 0xd0, 0x6d, 0x6b, 0xc7, 0x36, 0x74, 0xed, 0x10, 0x62, 0x50, 0xfd, + 0x60, 0x9f, 0x58, 0xeb, 0xc4, 0x20, 0x54, 0x7c, 0xde, 0x98, 0x6d, 0xbf, 0x11, 0xdc, 0xf6, 0xbf, + 0x1b, 0x75, 0x3d, 0x1e, 0x36, 0x56, 0x27, 0x61, 0xe4, 0xc1, 0x19, 0xe7, 0x84, 0x3f, 0x03, 0x80, + 0x3d, 0x76, 0x35, 0x1c, 0x7c, 0xec, 0x98, 0x6d, 0xbf, 0x1e, 0xa4, 0xf0, 0xbb, 0x61, 0xcf, 0xb1, + 0x5e, 0x10, 0x63, 0x54, 0x7f, 0x57, 0x49, 0x2c, 0xf5, 0xd7, 0xfe, 0x6e, 0xe9, 0x17, 0x60, 0xe9, + 0x20, 0x9a, 0x9d, 0x00, 0xc0, 0x34, 0x11, 0x8b, 0xbb, 0xe7, 0xa5, 0xf4, 0xb2, 0x79, 0x8d, 0x94, + 0xd8, 0x1d, 0x09, 0x1d, 0x92, 0xbe, 0x04, 0xbe, 0x0c, 0xaa, 0x4c, 0xcb, 0xe8, 0x1a, 0xd9, 0xc2, + 0x66, 0x90, 0x86, 0xe1, 0xd7, 0xa1, 0x6e, 0xd4, 0x85, 0xe2, 0x38, 0xb8, 0x0f, 0x16, 0x1d, 0xbb, + 0xb7, 0x89, 0x2d, 0xdc, 0x27, 0xac, 0x42, 0xfb, 0x4b, 0xc9, 0x6f, 0x9d, 0x66, 0xdb, 0xaf, 0x04, + 0x37, 0x0a, 0x3b, 0x59, 0x08, 0x3b, 0xb1, 0x49, 0x9a, 0x79, 0x10, 0xc8, 0x28, 0xa1, 0x99, 0xf9, + 0x98, 0x39, 0x93, 0xf9, 0x07, 0x88, 0x2c, 0x1f, 0x4f, 0xf8, 0x39, 0x33, 0xef, 0x3e, 0xad, 0x72, + 0xa2, 0xfb, 0x34, 0xc9, 0x89, 0x63, 0xf6, 0x98, 0x27, 0x8e, 0x4f, 0x14, 0x70, 0xc9, 0x99, 0x20, + 0x8d, 0x6a, 0x80, 0x4f, 0x4b, 0x67, 0xcc, 0xb4, 0x4c, 0x92, 0x91, 0xed, 0xd5, 0xd1, 0xb0, 0x71, + 0x69, 0x12, 0x24, 0x9a, 0xc8, 0x35, 0x96, 0x34, 0xb6, 0xd8, 0xf9, 0x6a, 0x55, 0xee, 0xe6, 0x95, + 0x31, 0x6e, 0x06, 0x1b, 0xa5, 0x9f, 0x87, 0xc1, 0x13, 0x0a, 0x69, 0xd4, 0x0f, 0xcb, 0xe0, 0x5c, + 0xa6, 0x5a, 0x7f, 0x89, 0x77, 0x85, 0x99, 0x13, 0x4d, 0xf1, 0x18, 0x27, 0x9a, 0x35, 0xb0, 0x20, + 0x3e, 0x30, 0xa7, 0x0e, 0x44, 0x61, 0x98, 0x74, 0x92, 0xdd, 0x28, 0x8d, 0x97, 0xdd, 0x55, 0x96, + 0x8f, 0x79, 0x57, 0x19, 0xf7, 0x42, 0xfc, 0x2f, 0xca, 0xcf, 0xe7, 0xac, 0x17, 0xe2, 0xef, 0x51, + 0x69, 0x3c, 0x7c, 0x3d, 0x48, 0xd6, 0x90, 0x61, 0x86, 0x33, 0xa4, 0xb2, 0x2f, 0x24, 0x48, 0xa1, + 0x9f, 0xe8, 0x23, 0xea, 0x7b, 0x92, 0x8f, 0xa8, 0xab, 0x63, 0xc2, 0x6c, 0xf2, 0x6b, 0x49, 0xe9, + 0xa1, 0xb3, 0x7a, 0xfc, 0x43, 0xa7, 0xfa, 0x17, 0x05, 0x3c, 0x93, 0xbb, 0x4d, 0xc1, 0xb5, 0x84, + 0x7a, 0xbc, 0x96, 0x52, 0x8f, 0xcf, 0xe6, 0x1a, 0xc6, 0x24, 0xa4, 0x29, 0xbf, 0xb1, 0xbc, 0x39, + 0xf6, 0xc6, 0x52, 0x72, 0x12, 0x19, 0x7f, 0x75, 0xd9, 0x7e, 0xf5, 0xe1, 0xa3, 0xfa, 0xd4, 0xa7, + 0x8f, 0xea, 0x53, 0x9f, 0x3d, 0xaa, 0x4f, 0xfd, 0x72, 0x54, 0x57, 0x1e, 0x8e, 0xea, 0xca, 0xa7, + 0xa3, 0xba, 0xf2, 0xd9, 0xa8, 0xae, 0xfc, 0x7d, 0x54, 0x57, 0x7e, 0xfb, 0x79, 0x7d, 0xea, 0x2e, + 0xcc, 0xfe, 0x2b, 0xf3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xfa, 0xed, 0x70, 0xaa, 0x29, + 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2168,6 +2200,18 @@ func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (in _ = i var l int _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.Partition != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) i-- @@ -2329,6 +2373,32 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetOrdinals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetOrdinals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetOrdinals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2382,6 +2452,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ordinals != nil { + { + size, err := m.Ordinals.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.PersistentVolumeClaimRetentionPolicy != nil { { size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) @@ -2984,6 +3066,10 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { if m.Partition != nil { n += 1 + sovGenerated(uint64(*m.Partition)) } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3038,6 +3124,16 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetOrdinals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Start)) + return n +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { if m == nil { return 0 @@ -3086,6 +3182,10 @@ func (m *StatefulSetSpec) Size() (n int) { l = m.PersistentVolumeClaimRetentionPolicy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Ordinals != nil { + l = m.Ordinals.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3453,6 +3553,7 @@ func (this *RollingUpdateStatefulSetStrategy) String() string { } s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -3499,6 +3600,16 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetOrdinals) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetOrdinals{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { if this == nil { return "nil" @@ -3530,6 +3641,7 @@ func (this *StatefulSetSpec) String() string { `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, + `Ordinals:` + strings.Replace(this.Ordinals.String(), "StatefulSetOrdinals", "StatefulSetOrdinals", 1) + `,`, `}`, }, "") return s @@ -7075,6 +7187,42 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } } m.Partition = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7573,6 +7721,75 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetOrdinals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetOrdinals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetOrdinals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8011,6 +8228,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordinals", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ordinals == nil { + m.Ordinals = &StatefulSetOrdinals{} + } + if err := m.Ordinals.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 6977b1e8f..d864f2eeb 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/apps/v1"; // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain @@ -43,10 +43,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -56,7 +56,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -67,7 +67,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -93,7 +93,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -109,7 +109,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -121,14 +121,15 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -199,6 +200,8 @@ message DaemonSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DaemonSetCondition conditions = 10; } @@ -222,7 +225,7 @@ message Deployment { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -242,10 +245,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -258,7 +261,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -274,10 +277,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + // The only allowed template.spec.restartPolicy value is "Always". + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -339,6 +343,8 @@ message DeploymentStatus { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -370,7 +376,7 @@ message ReplicaSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -396,7 +402,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -412,7 +418,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -438,18 +444,18 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -473,6 +479,8 @@ message ReplicaSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicaSetCondition conditions = 6; } @@ -493,7 +501,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -513,9 +521,8 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -531,7 +538,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -545,16 +552,27 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. message RollingUpdateStatefulSetStrategy { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. + // Partition indicates the ordinal at which the StatefulSet should be partitioned + // for updates. During a rolling update, all pods from ordinal Replicas-1 to + // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. + // This is helpful in being able to do a canary based deployment. The default value is 0. // +optional optional int32 partition = 1; + + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. This can not be 0. + // Defaults to 1. This field is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to + // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it + // will be counted towards MaxUnavailable. + // +optional + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // StatefulSet represents a set of pods with consistent identities. @@ -568,7 +586,7 @@ message StatefulSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -590,7 +608,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -606,12 +624,27 @@ message StatefulSetList { // Standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of stateful sets. repeated StatefulSet items = 2; } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +message StatefulSetOrdinals { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + optional int32 start = 1; +} + // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { @@ -642,13 +675,16 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -658,7 +694,8 @@ message StatefulSetSpec { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + // +listType=atomic + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -692,7 +729,6 @@ message StatefulSetSpec { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; @@ -702,8 +738,15 @@ message StatefulSetSpec { // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // which is beta. + // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. + // +optional + optional StatefulSetOrdinals ordinals = 11; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -745,10 +788,11 @@ message StatefulSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 6f910f23b..e942cd526 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -29,6 +29,7 @@ const ( DeprecatedRollbackTo = "deprecated.deployment.rollback.to" DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" + PodIndexLabel = "apps.kubernetes.io/pod-index" ) // +genclient @@ -36,6 +37,7 @@ const ( // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: @@ -112,11 +114,21 @@ const ( // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. type RollingUpdateStatefulSetStrategy struct { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. + // Partition indicates the ordinal at which the StatefulSet should be partitioned + // for updates. During a rolling update, all pods from ordinal Replicas-1 to + // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. + // This is helpful in being able to do a canary based deployment. The default value is 0. // +optional Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. This can not be 0. + // Defaults to 1. This field is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to + // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it + // will be counted towards MaxUnavailable. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"` } // PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine @@ -153,6 +165,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct { WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +type StatefulSetOrdinals struct { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + Start int32 `json:"start" protobuf:"varint,1,opt,name=start"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -171,7 +198,10 @@ type StatefulSetSpec struct { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -182,6 +212,7 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional + // +listType=atomic VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` // serviceName is the name of the service that governs this StatefulSet. @@ -216,7 +247,6 @@ type StatefulSetSpec struct { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` @@ -226,8 +256,15 @@ type StatefulSetSpec struct { // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // which is beta. + // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. + // +optional + Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -269,10 +306,11 @@ type StatefulSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -297,6 +335,7 @@ type StatefulSetCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { @@ -315,6 +354,7 @@ type StatefulSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { @@ -346,6 +386,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -471,6 +512,8 @@ type DeploymentStatus struct { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -514,6 +557,7 @@ type DeploymentCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DeploymentList is a list of Deployments. type DeploymentList struct { @@ -589,7 +633,6 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -606,6 +649,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` @@ -678,6 +722,8 @@ type DaemonSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } @@ -704,6 +750,7 @@ type DaemonSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { @@ -735,6 +782,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { @@ -753,6 +801,7 @@ type DaemonSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { @@ -780,6 +829,7 @@ type ReplicaSet struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { @@ -824,7 +874,7 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` @@ -848,6 +898,8 @@ type ReplicaSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -880,6 +932,7 @@ type ReplicaSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain @@ -905,6 +958,7 @@ type ControllerRevision struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevisionList is a resource containing a list of ControllerRevision objects. type ControllerRevisionList struct { diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index ac4cf7611..f3e221a0e 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -248,7 +248,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", @@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { @@ -281,8 +281,9 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { } var map_RollingUpdateStatefulSetStrategy = map[string]string{ - "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", - "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", } func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { @@ -323,6 +324,15 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetOrdinals = map[string]string{ + "": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "start": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", +} + +func (StatefulSetOrdinals) SwaggerDoc() map[string]string { + return map_StatefulSetOrdinals +} + var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", @@ -337,14 +347,15 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", - "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is beta.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -362,7 +373,7 @@ var map_StatefulSetStatus = map[string]string{ "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 8e4d4261a..6912986ac 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -597,6 +597,11 @@ func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateState *out = new(int32) **out = **in } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } return } @@ -688,6 +693,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals. +func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals { + if in == nil { + return nil + } + out := new(StatefulSetOrdinals) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { *out = *in @@ -736,6 +757,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) **out = **in } + if in.Ordinals != nil { + in, out := &in.Ordinals, &out.Ordinals + *out = new(StatefulSetOrdinals) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..34a036b62 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevision) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevisionList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Deployment) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 74584223c..76e755b4a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto +// source: k8s.io/api/apps/v1beta1/generated.proto package v1beta1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{0} + return fileDescriptor_2747f709ac7c95e7, []int{0} } func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } func (*ControllerRevisionList) ProtoMessage() {} func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{1} + return fileDescriptor_2747f709ac7c95e7, []int{1} } func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{2} + return fileDescriptor_2747f709ac7c95e7, []int{2} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{3} + return fileDescriptor_2747f709ac7c95e7, []int{3} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{4} + return fileDescriptor_2747f709ac7c95e7, []int{4} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{5} + return fileDescriptor_2747f709ac7c95e7, []int{5} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{6} + return fileDescriptor_2747f709ac7c95e7, []int{6} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{7} + return fileDescriptor_2747f709ac7c95e7, []int{7} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{8} + return fileDescriptor_2747f709ac7c95e7, []int{8} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{9} + return fileDescriptor_2747f709ac7c95e7, []int{9} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +332,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{10} + return fileDescriptor_2747f709ac7c95e7, []int{10} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +360,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{11} + return fileDescriptor_2747f709ac7c95e7, []int{11} } func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +388,7 @@ var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{12} + return fileDescriptor_2747f709ac7c95e7, []int{12} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +416,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{13} + return fileDescriptor_2747f709ac7c95e7, []int{13} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +444,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{14} + return fileDescriptor_2747f709ac7c95e7, []int{14} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +472,7 @@ var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{15} + return fileDescriptor_2747f709ac7c95e7, []int{15} } func (m *StatefulSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +500,7 @@ var xxx_messageInfo_StatefulSet proto.InternalMessageInfo func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } func (*StatefulSetCondition) ProtoMessage() {} func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{16} + return fileDescriptor_2747f709ac7c95e7, []int{16} } func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +528,7 @@ var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{17} + return fileDescriptor_2747f709ac7c95e7, []int{17} } func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -553,12 +553,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } +func (*StatefulSetOrdinals) ProtoMessage() {} +func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { + return fileDescriptor_2747f709ac7c95e7, []int{18} +} +func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetOrdinals.Merge(m, src) +} +func (m *StatefulSetOrdinals) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetOrdinals) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{18} + return fileDescriptor_2747f709ac7c95e7, []int{19} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -586,7 +614,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{19} + return fileDescriptor_2747f709ac7c95e7, []int{20} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -614,7 +642,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{20} + return fileDescriptor_2747f709ac7c95e7, []int{21} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -642,7 +670,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{21} + return fileDescriptor_2747f709ac7c95e7, []int{22} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -688,6 +716,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1beta1.StatefulSetOrdinals") proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") @@ -695,134 +724,138 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) -} - -var fileDescriptor_2a07313e8f66e805 = []byte{ - // 1968 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x4a, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x49, 0x81, 0x35, - 0x12, 0xe5, 0x43, 0xcb, 0x58, 0x49, 0x83, 0xc4, 0x2e, 0xdc, 0x8a, 0x92, 0x1b, 0x3b, 0x90, 0x22, - 0x65, 0x24, 0xc5, 0x68, 0xfa, 0x81, 0x0c, 0xc9, 0x31, 0xb5, 0xd1, 0x7e, 0x61, 0x77, 0xc8, 0x98, - 0xe8, 0xa5, 0x7f, 0x40, 0x81, 0xf4, 0xdc, 0xbf, 0xa2, 0xb7, 0x16, 0xed, 0xad, 0x87, 0xc2, 0xc7, - 0xa0, 0x97, 0xa6, 0x17, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, 0xec, - 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x9b, 0x99, 0xf7, 0x31, - 0xef, 0x47, 0xf8, 0xe1, 0xf9, 0xbb, 0xae, 0xaa, 0x59, 0xcd, 0xf3, 0x7e, 0x9b, 0x3a, 0x26, 0x65, - 0xd4, 0x6d, 0x0e, 0xa8, 0xd9, 0xb5, 0x9c, 0xa6, 0x14, 0x10, 0x5b, 0x6b, 0x12, 0xdb, 0x76, 0x9b, - 0x83, 0x5b, 0x6d, 0xca, 0xc8, 0xad, 0x66, 0x8f, 0x9a, 0xd4, 0x21, 0x8c, 0x76, 0x55, 0xdb, 0xb1, - 0x98, 0x85, 0xd6, 0x3c, 0x45, 0x95, 0xd8, 0x9a, 0xca, 0x15, 0x55, 0xa9, 0xb8, 0xbe, 0xd5, 0xd3, - 0xd8, 0x59, 0xbf, 0xad, 0x76, 0x2c, 0xa3, 0xd9, 0xb3, 0x7a, 0x56, 0x53, 0xe8, 0xb7, 0xfb, 0x8f, - 0xc4, 0x97, 0xf8, 0x10, 0xbf, 0x3c, 0x9c, 0xf5, 0x46, 0xc4, 0x61, 0xc7, 0x72, 0x68, 0x73, 0x90, - 0xf2, 0xb5, 0xfe, 0x76, 0xa8, 0x63, 0x90, 0xce, 0x99, 0x66, 0x52, 0x67, 0xd8, 0xb4, 0xcf, 0x7b, - 0x7c, 0xc1, 0x6d, 0x1a, 0x94, 0x91, 0x2c, 0xab, 0x66, 0x9e, 0x95, 0xd3, 0x37, 0x99, 0x66, 0xd0, - 0x94, 0xc1, 0x3b, 0x97, 0x19, 0xb8, 0x9d, 0x33, 0x6a, 0x90, 0x94, 0xdd, 0x5b, 0x79, 0x76, 0x7d, - 0xa6, 0xe9, 0x4d, 0xcd, 0x64, 0x2e, 0x73, 0x92, 0x46, 0x8d, 0x7f, 0x2b, 0x80, 0x76, 0x2d, 0x93, - 0x39, 0x96, 0xae, 0x53, 0x07, 0xd3, 0x81, 0xe6, 0x6a, 0x96, 0x89, 0x3e, 0x85, 0x32, 0x3f, 0x4f, - 0x97, 0x30, 0x52, 0x55, 0x36, 0x94, 0xcd, 0xc5, 0xed, 0x37, 0xd5, 0xf0, 0xa6, 0x03, 0x78, 0xd5, - 0x3e, 0xef, 0xf1, 0x05, 0x57, 0xe5, 0xda, 0xea, 0xe0, 0x96, 0x7a, 0xd8, 0xfe, 0x8c, 0x76, 0xd8, - 0x01, 0x65, 0xa4, 0x85, 0x9e, 0x8c, 0xea, 0x53, 0xe3, 0x51, 0x1d, 0xc2, 0x35, 0x1c, 0xa0, 0xa2, - 0x43, 0x98, 0x15, 0xe8, 0xd3, 0x02, 0x7d, 0x2b, 0x17, 0x5d, 0x1e, 0x5a, 0xc5, 0xe4, 0xf3, 0x7b, - 0x8f, 0x19, 0x35, 0xf9, 0xf6, 0x5a, 0x2f, 0x48, 0xe8, 0xd9, 0x3d, 0xc2, 0x08, 0x16, 0x40, 0xe8, - 0x0d, 0x28, 0x3b, 0x72, 0xfb, 0xd5, 0x99, 0x0d, 0x65, 0x73, 0xa6, 0x75, 0x4d, 0x6a, 0x95, 0xfd, - 0x63, 0xe1, 0x40, 0xa3, 0xf1, 0x44, 0x81, 0x1b, 0xe9, 0x73, 0xef, 0x6b, 0x2e, 0x43, 0x3f, 0x4d, - 0x9d, 0x5d, 0x2d, 0x76, 0x76, 0x6e, 0x2d, 0x4e, 0x1e, 0x38, 0xf6, 0x57, 0x22, 0xe7, 0x3e, 0x82, - 0x92, 0xc6, 0xa8, 0xe1, 0x56, 0xa7, 0x37, 0x66, 0x36, 0x17, 0xb7, 0x5f, 0x57, 0x73, 0x12, 0x58, - 0x4d, 0xef, 0xae, 0xb5, 0x24, 0x71, 0x4b, 0x0f, 0x38, 0x02, 0xf6, 0x80, 0x1a, 0xbf, 0x9a, 0x06, - 0xd8, 0xa3, 0xb6, 0x6e, 0x0d, 0x0d, 0x6a, 0xb2, 0x2b, 0x08, 0xdd, 0x03, 0x98, 0x75, 0x6d, 0xda, - 0x91, 0xa1, 0x7b, 0x25, 0xf7, 0x04, 0xe1, 0xa6, 0x8e, 0x6d, 0xda, 0x09, 0x83, 0xc6, 0xbf, 0xb0, - 0x80, 0x40, 0x1f, 0xc1, 0x9c, 0xcb, 0x08, 0xeb, 0xbb, 0x22, 0x64, 0x8b, 0xdb, 0xaf, 0x16, 0x01, - 0x13, 0x06, 0xad, 0x8a, 0x84, 0x9b, 0xf3, 0xbe, 0xb1, 0x04, 0x6a, 0xfc, 0x75, 0x06, 0x56, 0x42, - 0xe5, 0x5d, 0xcb, 0xec, 0x6a, 0x8c, 0xa7, 0xf4, 0x1d, 0x98, 0x65, 0x43, 0x9b, 0x8a, 0x3b, 0x59, - 0x68, 0xbd, 0xe2, 0x6f, 0xe6, 0x64, 0x68, 0xd3, 0x67, 0xa3, 0xfa, 0x5a, 0x86, 0x09, 0x17, 0x61, - 0x61, 0x84, 0xf6, 0x83, 0x7d, 0x4e, 0x0b, 0xf3, 0xb7, 0xe3, 0xce, 0x9f, 0x8d, 0xea, 0x19, 0x0d, - 0x44, 0x0d, 0x90, 0xe2, 0x5b, 0x44, 0x9f, 0x41, 0x45, 0x27, 0x2e, 0x3b, 0xb5, 0xbb, 0x84, 0xd1, - 0x13, 0xcd, 0xa0, 0xd5, 0x39, 0x71, 0xfa, 0xd7, 0x8a, 0x05, 0x8a, 0x5b, 0xb4, 0x6e, 0xc8, 0x1d, - 0x54, 0xf6, 0x63, 0x48, 0x38, 0x81, 0x8c, 0x06, 0x80, 0xf8, 0xca, 0x89, 0x43, 0x4c, 0xd7, 0x3b, - 0x15, 0xf7, 0x37, 0x3f, 0xb1, 0xbf, 0x75, 0xe9, 0x0f, 0xed, 0xa7, 0xd0, 0x70, 0x86, 0x07, 0xf4, - 0x32, 0xcc, 0x39, 0x94, 0xb8, 0x96, 0x59, 0x9d, 0x15, 0x37, 0x16, 0x84, 0x0b, 0x8b, 0x55, 0x2c, - 0xa5, 0xe8, 0x55, 0x98, 0x37, 0xa8, 0xeb, 0x92, 0x1e, 0xad, 0x96, 0x84, 0xe2, 0xb2, 0x54, 0x9c, - 0x3f, 0xf0, 0x96, 0xb1, 0x2f, 0x6f, 0xfc, 0x5e, 0x81, 0x4a, 0x18, 0xa6, 0x2b, 0xa8, 0xd5, 0xfb, - 0xf1, 0x5a, 0xfd, 0x6e, 0x81, 0xe4, 0xcc, 0xa9, 0xd1, 0x7f, 0x4c, 0x03, 0x0a, 0x95, 0xb0, 0xa5, - 0xeb, 0x6d, 0xd2, 0x39, 0x47, 0x1b, 0x30, 0x6b, 0x12, 0xc3, 0xcf, 0xc9, 0xa0, 0x40, 0x3e, 0x24, - 0x06, 0xc5, 0x42, 0x82, 0xbe, 0x50, 0x00, 0xf5, 0x45, 0x34, 0xbb, 0x3b, 0xa6, 0x69, 0x31, 0xc2, - 0x2f, 0xd8, 0xdf, 0xd0, 0x6e, 0x81, 0x0d, 0xf9, 0xbe, 0xd4, 0xd3, 0x14, 0xca, 0x3d, 0x93, 0x39, - 0xc3, 0x30, 0xb0, 0x69, 0x05, 0x9c, 0xe1, 0x1a, 0xfd, 0x04, 0xc0, 0x91, 0x98, 0x27, 0x96, 0x2c, - 0xdb, 0xfc, 0x1e, 0xe0, 0xbb, 0xdf, 0xb5, 0xcc, 0x47, 0x5a, 0x2f, 0x6c, 0x2c, 0x38, 0x80, 0xc0, - 0x11, 0xb8, 0xf5, 0x7b, 0xb0, 0x96, 0xb3, 0x4f, 0x74, 0x0d, 0x66, 0xce, 0xe9, 0xd0, 0xbb, 0x2a, - 0xcc, 0x7f, 0xa2, 0x55, 0x28, 0x0d, 0x88, 0xde, 0xa7, 0x5e, 0x4d, 0x62, 0xef, 0xe3, 0xf6, 0xf4, - 0xbb, 0x4a, 0xe3, 0xb7, 0xa5, 0x68, 0xa6, 0xf0, 0x7e, 0x83, 0x36, 0xf9, 0xf3, 0x60, 0xeb, 0x5a, - 0x87, 0xb8, 0x02, 0xa3, 0xd4, 0x7a, 0xc1, 0x7b, 0x1a, 0xbc, 0x35, 0x1c, 0x48, 0xd1, 0xcf, 0xa0, - 0xec, 0x52, 0x9d, 0x76, 0x98, 0xe5, 0xc8, 0x16, 0xf7, 0x56, 0xc1, 0x9c, 0x22, 0x6d, 0xaa, 0x1f, - 0x4b, 0x53, 0x0f, 0xde, 0xff, 0xc2, 0x01, 0x24, 0xfa, 0x08, 0xca, 0x8c, 0x1a, 0xb6, 0x4e, 0x18, - 0x95, 0xb7, 0x17, 0xcb, 0x2b, 0xde, 0x3b, 0x38, 0xd8, 0x91, 0xd5, 0x3d, 0x91, 0x6a, 0xa2, 0x7b, - 0x06, 0x79, 0xea, 0xaf, 0xe2, 0x00, 0x06, 0xfd, 0x18, 0xca, 0x2e, 0xe3, 0xaf, 0x7a, 0x6f, 0x28, - 0xaa, 0xed, 0xa2, 0x67, 0x25, 0xda, 0x47, 0x3d, 0x93, 0x10, 0xda, 0x5f, 0xc1, 0x01, 0x1c, 0xda, - 0x81, 0x65, 0x43, 0x33, 0x31, 0x25, 0xdd, 0xe1, 0x31, 0xed, 0x58, 0x66, 0xd7, 0x15, 0x65, 0x5a, - 0x6a, 0xad, 0x49, 0xa3, 0xe5, 0x83, 0xb8, 0x18, 0x27, 0xf5, 0xd1, 0x3e, 0xac, 0xfa, 0xcf, 0xee, - 0x7d, 0xcd, 0x65, 0x96, 0x33, 0xdc, 0xd7, 0x0c, 0x8d, 0x89, 0x9e, 0x57, 0x6a, 0x55, 0xc7, 0xa3, - 0xfa, 0x2a, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xde, 0x57, 0x6c, 0xd2, 0x77, 0x69, 0x57, 0xf4, 0xb0, - 0x72, 0xd8, 0x57, 0x8e, 0xc4, 0x2a, 0x96, 0x52, 0xf4, 0x30, 0x96, 0xa6, 0xe5, 0xc9, 0xd2, 0xb4, - 0x92, 0x9f, 0xa2, 0xe8, 0x14, 0xd6, 0x6c, 0xc7, 0xea, 0x39, 0xd4, 0x75, 0xf7, 0x28, 0xe9, 0xea, - 0x9a, 0x49, 0xfd, 0x9b, 0x59, 0x10, 0x27, 0x7a, 0x69, 0x3c, 0xaa, 0xaf, 0x1d, 0x65, 0xab, 0xe0, - 0x3c, 0xdb, 0xc6, 0x9f, 0x66, 0xe1, 0x5a, 0xf2, 0x8d, 0x43, 0x1f, 0x00, 0xb2, 0xda, 0x2e, 0x75, - 0x06, 0xb4, 0xfb, 0xbe, 0x37, 0xb8, 0xf1, 0xe9, 0x46, 0x11, 0xd3, 0x4d, 0x50, 0xb7, 0x87, 0x29, - 0x0d, 0x9c, 0x61, 0xe5, 0xcd, 0x47, 0xb2, 0x00, 0xa6, 0xc5, 0x46, 0x23, 0xf3, 0x51, 0xaa, 0x08, - 0x76, 0x60, 0x59, 0xd6, 0xbe, 0x2f, 0x14, 0xc9, 0x1a, 0x89, 0xfb, 0x69, 0x5c, 0x8c, 0x93, 0xfa, - 0xe8, 0x0e, 0x2c, 0x39, 0x3c, 0x0f, 0x02, 0x80, 0x79, 0x01, 0xf0, 0x2d, 0x09, 0xb0, 0x84, 0xa3, - 0x42, 0x1c, 0xd7, 0x45, 0xef, 0xc3, 0x75, 0x32, 0x20, 0x9a, 0x4e, 0xda, 0x3a, 0x0d, 0x00, 0x66, - 0x05, 0xc0, 0x8b, 0x12, 0xe0, 0xfa, 0x4e, 0x52, 0x01, 0xa7, 0x6d, 0xd0, 0x01, 0xac, 0xf4, 0xcd, - 0x34, 0x94, 0x97, 0xc4, 0x2f, 0x49, 0xa8, 0x95, 0xd3, 0xb4, 0x0a, 0xce, 0xb2, 0x43, 0x9f, 0x02, - 0x74, 0xfc, 0x57, 0xdd, 0xad, 0xce, 0x89, 0x36, 0xfc, 0x46, 0x81, 0x62, 0x0b, 0x46, 0x81, 0xb0, - 0x05, 0x06, 0x4b, 0x2e, 0x8e, 0x60, 0xa2, 0xdb, 0x50, 0xe9, 0x58, 0xba, 0x2e, 0x32, 0x7f, 0xd7, - 0xea, 0x9b, 0x4c, 0x24, 0x6f, 0xa9, 0x85, 0xf8, 0x63, 0xbf, 0x1b, 0x93, 0xe0, 0x84, 0x66, 0xe3, - 0x8f, 0x4a, 0xf4, 0x99, 0xf1, 0xcb, 0x19, 0xdd, 0x8e, 0x8d, 0x3e, 0x2f, 0x27, 0x46, 0x9f, 0x1b, - 0x69, 0x8b, 0xc8, 0xe4, 0xa3, 0xc1, 0x12, 0x4f, 0x7e, 0xcd, 0xec, 0x79, 0x01, 0x97, 0x2d, 0xf1, - 0xcd, 0x0b, 0x4b, 0x29, 0xd0, 0x8e, 0x3c, 0x8c, 0xd7, 0x45, 0xcc, 0xa3, 0x42, 0x1c, 0x47, 0x6e, - 0xdc, 0x85, 0x4a, 0xbc, 0x0e, 0x63, 0x33, 0xbd, 0x72, 0xe9, 0x4c, 0xff, 0xb5, 0x02, 0x6b, 0x39, - 0xde, 0x91, 0x0e, 0x15, 0x83, 0x3c, 0x8e, 0x84, 0xf9, 0xd2, 0xd9, 0x98, 0xb3, 0x26, 0xd5, 0x63, - 0x4d, 0xea, 0x03, 0x93, 0x1d, 0x3a, 0xc7, 0xcc, 0xd1, 0xcc, 0x9e, 0x17, 0x87, 0x83, 0x18, 0x16, - 0x4e, 0x60, 0xa3, 0x4f, 0xa0, 0x6c, 0x90, 0xc7, 0xc7, 0x7d, 0xa7, 0x97, 0x75, 0x5f, 0xc5, 0xfc, - 0x88, 0xf7, 0xe3, 0x40, 0xa2, 0xe0, 0x00, 0xaf, 0x71, 0x08, 0x1b, 0xb1, 0x43, 0xf2, 0x56, 0x41, - 0x1f, 0xf5, 0xf5, 0x63, 0x1a, 0x06, 0xfc, 0x75, 0x58, 0xb0, 0x89, 0xc3, 0xb4, 0xa0, 0x5d, 0x94, - 0x5a, 0x4b, 0xe3, 0x51, 0x7d, 0xe1, 0xc8, 0x5f, 0xc4, 0xa1, 0xbc, 0xf1, 0x1f, 0x05, 0x4a, 0xc7, - 0x1d, 0xa2, 0xd3, 0x2b, 0xa0, 0x0e, 0x7b, 0x31, 0xea, 0xd0, 0xc8, 0x4d, 0x22, 0xb1, 0x9f, 0x5c, - 0xd6, 0xb0, 0x9f, 0x60, 0x0d, 0x37, 0x2f, 0xc1, 0xb9, 0x98, 0x30, 0xbc, 0x07, 0x0b, 0x81, 0xbb, - 0x58, 0x97, 0x54, 0x2e, 0xeb, 0x92, 0x8d, 0xdf, 0x4c, 0xc3, 0x62, 0xc4, 0xc5, 0x64, 0xd6, 0xfc, - 0xba, 0x23, 0x83, 0x06, 0xef, 0x24, 0xdb, 0x45, 0x0e, 0xa2, 0xfa, 0x43, 0x85, 0x37, 0xbf, 0x85, - 0xaf, 0x77, 0x7a, 0xd6, 0xb8, 0x0b, 0x15, 0x46, 0x9c, 0x1e, 0x65, 0xbe, 0x4c, 0x5c, 0xd8, 0x42, - 0x48, 0x1e, 0x4e, 0x62, 0x52, 0x9c, 0xd0, 0x5e, 0xbf, 0x03, 0x4b, 0x31, 0x67, 0x13, 0x0d, 0x61, - 0x5f, 0xf0, 0xcb, 0x09, 0x93, 0xf3, 0x0a, 0xb2, 0xeb, 0x83, 0x58, 0x76, 0x6d, 0xe6, 0x5f, 0x66, - 0xa4, 0x64, 0xf2, 0x72, 0x0c, 0x27, 0x72, 0xec, 0xb5, 0x42, 0x68, 0x17, 0x67, 0xda, 0x3f, 0xa7, - 0x61, 0x35, 0xa2, 0x1d, 0x72, 0xd3, 0xef, 0xc7, 0x1a, 0xf4, 0x66, 0xa2, 0x41, 0x57, 0xb3, 0x6c, - 0x9e, 0x1b, 0x39, 0xcd, 0x26, 0x8c, 0x33, 0xff, 0x8f, 0x84, 0xf1, 0x0f, 0x0a, 0x2c, 0x47, 0xee, - 0xee, 0x0a, 0x18, 0xe3, 0x83, 0x38, 0x63, 0xbc, 0x59, 0x24, 0x69, 0x72, 0x28, 0xe3, 0xbf, 0x14, - 0x68, 0x46, 0xb4, 0x8e, 0xa8, 0xe3, 0x6a, 0x2e, 0xa3, 0x26, 0xfb, 0xd8, 0xd2, 0xfb, 0x06, 0xdd, - 0xd5, 0x89, 0x66, 0x60, 0xca, 0x17, 0x34, 0xcb, 0x3c, 0xb2, 0x74, 0xad, 0x33, 0x44, 0x04, 0x16, - 0x3f, 0x3f, 0xa3, 0xe6, 0x1e, 0xd5, 0x29, 0xa3, 0x5d, 0x99, 0x4e, 0x3f, 0x90, 0xf0, 0x8b, 0x0f, - 0x43, 0xd1, 0xb3, 0x51, 0x7d, 0xb3, 0x08, 0xa2, 0xc8, 0xb2, 0x28, 0x26, 0xfa, 0x39, 0x00, 0xff, - 0x14, 0xfd, 0xa8, 0x2b, 0x13, 0xee, 0xae, 0x5f, 0x95, 0x0f, 0x03, 0xc9, 0x44, 0x0e, 0x22, 0x88, - 0x8d, 0xbf, 0xcd, 0xc7, 0x62, 0xf6, 0x8d, 0xe7, 0x6e, 0xbf, 0x80, 0xd5, 0x41, 0x78, 0x3b, 0xbe, - 0x02, 0x9f, 0x75, 0x67, 0x92, 0xff, 0x87, 0x05, 0xf0, 0x59, 0xf7, 0xda, 0xfa, 0xb6, 0x74, 0xb2, - 0xfa, 0x71, 0x06, 0x1c, 0xce, 0x74, 0x82, 0xbe, 0x07, 0x8b, 0x9c, 0x27, 0x68, 0x1d, 0xfa, 0x21, - 0x31, 0xfc, 0x7a, 0x5a, 0xf1, 0xf3, 0xe5, 0x38, 0x14, 0xe1, 0xa8, 0x1e, 0x3a, 0x83, 0x15, 0xdb, - 0xea, 0x1e, 0x10, 0x93, 0xf4, 0x28, 0x9f, 0xae, 0xbc, 0x50, 0x0a, 0x42, 0xb7, 0xd0, 0x7a, 0xc7, - 0x9f, 0xa9, 0x8f, 0xd2, 0x2a, 0xcf, 0x38, 0x33, 0x4a, 0x2f, 0x8b, 0x24, 0xc8, 0x82, 0x44, 0x0e, - 0x54, 0xfa, 0x72, 0xca, 0x91, 0xfc, 0xd6, 0xfb, 0xe7, 0x6a, 0xbb, 0x48, 0x61, 0x9d, 0xc6, 0x2c, - 0xc3, 0x47, 0x2f, 0xbe, 0x8e, 0x13, 0x1e, 0x72, 0xf9, 0x6a, 0xf9, 0x7f, 0xe2, 0xab, 0x19, 0x04, - 0x7a, 0x61, 0x42, 0x02, 0xfd, 0x67, 0x05, 0x6e, 0xda, 0x05, 0x6a, 0xa9, 0x0a, 0xe2, 0x6e, 0xee, - 0x17, 0xb9, 0x9b, 0x22, 0xb5, 0xd9, 0xda, 0x1c, 0x8f, 0xea, 0x37, 0x8b, 0x68, 0xe2, 0x42, 0xfb, - 0x6b, 0xfc, 0xae, 0x04, 0xd7, 0x53, 0xaf, 0x25, 0xfa, 0xd1, 0x05, 0x24, 0xf7, 0xc6, 0x73, 0x23, - 0xb8, 0x29, 0x76, 0x3a, 0x33, 0x01, 0x3b, 0xdd, 0x81, 0xe5, 0x4e, 0xdf, 0x71, 0xa8, 0xc9, 0x12, - 0xdc, 0x34, 0x08, 0xea, 0x6e, 0x5c, 0x8c, 0x93, 0xfa, 0x59, 0x04, 0xbb, 0x34, 0x21, 0xc1, 0x8e, - 0xee, 0x42, 0x92, 0x24, 0xaf, 0x04, 0xd3, 0xbb, 0x90, 0x5c, 0x29, 0xa9, 0xcf, 0x07, 0x44, 0x0f, - 0x35, 0x40, 0x98, 0x8f, 0x0f, 0x88, 0xa7, 0x31, 0x29, 0x4e, 0x68, 0x67, 0x90, 0xd5, 0x85, 0xa2, - 0x64, 0x15, 0x91, 0x18, 0x95, 0x06, 0xd1, 0xef, 0xb6, 0x8a, 0xe4, 0x6e, 0x71, 0x2e, 0x9d, 0xf9, - 0x2f, 0xc2, 0xe2, 0xe4, 0xff, 0x22, 0x34, 0xfe, 0xa2, 0xc0, 0x8b, 0xb9, 0x9d, 0x05, 0xed, 0xc4, - 0xc6, 0xb7, 0xad, 0xc4, 0xf8, 0xf6, 0x9d, 0x5c, 0xc3, 0xc8, 0x0c, 0xe7, 0x64, 0xd3, 0xec, 0xf7, - 0x8a, 0xd1, 0xec, 0x0c, 0x0e, 0x78, 0x39, 0xdf, 0x6e, 0x6d, 0x3d, 0x79, 0x5a, 0x9b, 0xfa, 0xf2, - 0x69, 0x6d, 0xea, 0xab, 0xa7, 0xb5, 0xa9, 0x5f, 0x8e, 0x6b, 0xca, 0x93, 0x71, 0x4d, 0xf9, 0x72, - 0x5c, 0x53, 0xbe, 0x1a, 0xd7, 0x94, 0xbf, 0x8f, 0x6b, 0xca, 0xaf, 0xbf, 0xae, 0x4d, 0x7d, 0x32, - 0x2f, 0x3d, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xfc, 0xe0, 0xc3, 0xad, 0x1d, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2747f709ac7c95e7) +} + +var fileDescriptor_2747f709ac7c95e7 = []byte{ + // 2018 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xf7, 0x52, 0xa2, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x25, 0x61, 0x63, + 0xc4, 0x4a, 0x62, 0x2f, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x75, 0x21, 0x4a, 0x6e, 0xec, 0x40, 0x8a, + 0x94, 0x91, 0x64, 0xa3, 0xe9, 0x07, 0x32, 0x22, 0xc7, 0xd4, 0x46, 0xfb, 0x85, 0xdd, 0x21, 0x63, + 0xa2, 0x97, 0xfe, 0x01, 0x05, 0xd2, 0x73, 0xff, 0x8a, 0xf6, 0xd4, 0xa2, 0x45, 0x2f, 0x3d, 0x14, + 0x3e, 0x06, 0xbd, 0x34, 0x27, 0xa2, 0x66, 0xae, 0xed, 0xad, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, + 0xec, 0xf7, 0xae, 0xb4, 0x2c, 0x60, 0x01, 0xcd, 0x8d, 0x3b, 0xef, 0xbd, 0xdf, 0x7b, 0xf3, 0xe6, + 0xbd, 0x37, 0xef, 0x0d, 0xe1, 0xfa, 0xe9, 0x7b, 0x9e, 0xa6, 0xdb, 0x4d, 0xe2, 0xe8, 0x4d, 0xe2, + 0x38, 0x5e, 0xb3, 0x7f, 0xeb, 0x98, 0x32, 0x72, 0xab, 0xd9, 0xa5, 0x16, 0x75, 0x09, 0xa3, 0x1d, + 0xcd, 0x71, 0x6d, 0x66, 0xa3, 0x25, 0x9f, 0x51, 0x23, 0x8e, 0xae, 0x71, 0x46, 0x4d, 0x32, 0x2e, + 0xdf, 0xec, 0xea, 0xec, 0xa4, 0x77, 0xac, 0xb5, 0x6d, 0xb3, 0xd9, 0xb5, 0xbb, 0x76, 0x53, 0xf0, + 0x1f, 0xf7, 0x1e, 0x8b, 0x2f, 0xf1, 0x21, 0x7e, 0xf9, 0x38, 0xcb, 0x6a, 0x4c, 0x61, 0xdb, 0x76, + 0x69, 0xb3, 0x9f, 0xd1, 0xb5, 0xfc, 0x4e, 0xc4, 0x63, 0x92, 0xf6, 0x89, 0x6e, 0x51, 0x77, 0xd0, + 0x74, 0x4e, 0xbb, 0x7c, 0xc1, 0x6b, 0x9a, 0x94, 0x91, 0x3c, 0xa9, 0x66, 0x91, 0x94, 0xdb, 0xb3, + 0x98, 0x6e, 0xd2, 0x8c, 0xc0, 0xbb, 0xe7, 0x09, 0x78, 0xed, 0x13, 0x6a, 0x92, 0x8c, 0xdc, 0xdb, + 0x45, 0x72, 0x3d, 0xa6, 0x1b, 0x4d, 0xdd, 0x62, 0x1e, 0x73, 0xd3, 0x42, 0xea, 0xbf, 0x15, 0x40, + 0x5b, 0xb6, 0xc5, 0x5c, 0xdb, 0x30, 0xa8, 0x8b, 0x69, 0x5f, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42, + 0x8d, 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0x6b, 0xca, 0xfa, 0xec, 0xc6, 0x5b, 0x5a, 0xe4, 0xe9, + 0x10, 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0xd6, 0xbf, 0xa5, 0xed, 0x1d, 0x7f, + 0x46, 0xdb, 0x6c, 0x97, 0x32, 0xd2, 0x42, 0x4f, 0x87, 0xab, 0x97, 0x46, 0xc3, 0x55, 0x88, 0xd6, + 0x70, 0x88, 0x8a, 0xf6, 0x60, 0x52, 0xa0, 0x57, 0x04, 0xfa, 0xcd, 0x42, 0x74, 0xb9, 0x69, 0x0d, + 0x93, 0xcf, 0xef, 0x3d, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x93, 0xdb, 0x84, 0x11, + 0x2c, 0x80, 0xd0, 0x0d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xc4, 0x9a, 0xb2, 0x3e, 0xd1, 0xba, 0x2c, + 0xb9, 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x6a, 0x76, 0xdf, 0x3b, 0xba, 0xc7, + 0xd0, 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8, + 0xbe, 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xda, 0xc4, 0xfa, 0xec, 0xc6, 0x9b, 0x5a, + 0x41, 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0x73, 0x12, 0xb7, 0xfa, 0x80, 0x23, 0x60, 0x1f, 0x48, 0xfd, + 0x65, 0x05, 0x60, 0x9b, 0x3a, 0x86, 0x3d, 0x30, 0xa9, 0xc5, 0x2e, 0xe0, 0xe8, 0x1e, 0xc0, 0xa4, + 0xe7, 0xd0, 0xb6, 0x3c, 0xba, 0xeb, 0x85, 0x3b, 0x88, 0x8c, 0x3a, 0x70, 0x68, 0x3b, 0x3a, 0x34, + 0xfe, 0x85, 0x05, 0x04, 0xfa, 0x18, 0xa6, 0x3c, 0x46, 0x58, 0xcf, 0x13, 0x47, 0x36, 0xbb, 0xf1, + 0x7a, 0x19, 0x30, 0x21, 0xd0, 0xaa, 0x4b, 0xb8, 0x29, 0xff, 0x1b, 0x4b, 0x20, 0xf5, 0x6f, 0x13, + 0xb0, 0x10, 0x31, 0x6f, 0xd9, 0x56, 0x47, 0x67, 0x3c, 0xa4, 0xef, 0xc0, 0x24, 0x1b, 0x38, 0x54, + 0xf8, 0x64, 0xa6, 0x75, 0x3d, 0x30, 0xe6, 0x70, 0xe0, 0xd0, 0xe7, 0xc3, 0xd5, 0xa5, 0x1c, 0x11, + 0x4e, 0xc2, 0x42, 0x08, 0xed, 0x84, 0x76, 0x56, 0x84, 0xf8, 0x3b, 0x49, 0xe5, 0xcf, 0x87, 0xab, + 0x39, 0x05, 0x44, 0x0b, 0x91, 0x92, 0x26, 0xa2, 0xcf, 0xa0, 0x6e, 0x10, 0x8f, 0x1d, 0x39, 0x1d, + 0xc2, 0xe8, 0xa1, 0x6e, 0xd2, 0xc6, 0x94, 0xd8, 0xfd, 0x1b, 0xe5, 0x0e, 0x8a, 0x4b, 0xb4, 0xae, + 0x4a, 0x0b, 0xea, 0x3b, 0x09, 0x24, 0x9c, 0x42, 0x46, 0x7d, 0x40, 0x7c, 0xe5, 0xd0, 0x25, 0x96, + 0xe7, 0xef, 0x8a, 0xeb, 0x9b, 0x1e, 0x5b, 0xdf, 0xb2, 0xd4, 0x87, 0x76, 0x32, 0x68, 0x38, 0x47, + 0x03, 0x7a, 0x0d, 0xa6, 0x5c, 0x4a, 0x3c, 0xdb, 0x6a, 0x4c, 0x0a, 0x8f, 0x85, 0xc7, 0x85, 0xc5, + 0x2a, 0x96, 0x54, 0xf4, 0x3a, 0x4c, 0x9b, 0xd4, 0xf3, 0x48, 0x97, 0x36, 0xaa, 0x82, 0x71, 0x5e, + 0x32, 0x4e, 0xef, 0xfa, 0xcb, 0x38, 0xa0, 0xab, 0xbf, 0x57, 0xa0, 0x1e, 0x1d, 0xd3, 0x05, 0xe4, + 0xea, 0xfd, 0x64, 0xae, 0xbe, 0x5a, 0x22, 0x38, 0x0b, 0x72, 0xf4, 0x1f, 0x15, 0x40, 0x11, 0x13, + 0xb6, 0x0d, 0xe3, 0x98, 0xb4, 0x4f, 0xd1, 0x1a, 0x4c, 0x5a, 0xc4, 0x0c, 0x62, 0x32, 0x4c, 0x90, + 0x8f, 0x88, 0x49, 0xb1, 0xa0, 0xa0, 0x2f, 0x14, 0x40, 0x3d, 0x71, 0x9a, 0x9d, 0x4d, 0xcb, 0xb2, + 0x19, 0xe1, 0x0e, 0x0e, 0x0c, 0xda, 0x2a, 0x61, 0x50, 0xa0, 0x4b, 0x3b, 0xca, 0xa0, 0xdc, 0xb3, + 0x98, 0x3b, 0x88, 0x0e, 0x36, 0xcb, 0x80, 0x73, 0x54, 0xa3, 0x1f, 0x03, 0xb8, 0x12, 0xf3, 0xd0, + 0x96, 0x69, 0x5b, 0x5c, 0x03, 0x02, 0xf5, 0x5b, 0xb6, 0xf5, 0x58, 0xef, 0x46, 0x85, 0x05, 0x87, + 0x10, 0x38, 0x06, 0xb7, 0x7c, 0x0f, 0x96, 0x0a, 0xec, 0x44, 0x97, 0x61, 0xe2, 0x94, 0x0e, 0x7c, + 0x57, 0x61, 0xfe, 0x13, 0x2d, 0x42, 0xb5, 0x4f, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0xb7, + 0x2b, 0xef, 0x29, 0xea, 0x6f, 0xaa, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf3, 0xeb, 0xc1, 0x31, + 0xf4, 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f, + 0x42, 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9, + 0x71, 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06, + 0x61, 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44, + 0xf5, 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0xee, + 0x40, 0x64, 0xdb, 0x59, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38, + 0xb4, 0x09, 0xf3, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0x83, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2, + 0xb4, 0xda, 0x5a, 0x92, 0x42, 0xf3, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x0c, 0xae, + 0xdd, 0xfb, 0xba, 0xc7, 0x6c, 0x77, 0xb0, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x8c, + 0x86, 0xab, 0x8b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51, + 0xc3, 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0xc6, 0x0b, + 0xd3, 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x72, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa6, 0xa4, + 0x63, 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x23, 0x76, 0xf4, 0xca, 0x68, 0xb8, 0xba, 0xb4, 0x9f, 0xcf, + 0x82, 0x8b, 0x64, 0xd5, 0x3f, 0x4f, 0xc2, 0xe5, 0xf4, 0x1d, 0x87, 0x3e, 0x04, 0x64, 0x1f, 0x7b, + 0xd4, 0xed, 0xd3, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee, + 0x65, 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26, + 0x09, 0x36, 0x61, 0x5e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7, + 0xf9, 0xd1, 0x1d, 0x98, 0x73, 0x79, 0x1c, 0x84, 0x00, 0xd3, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x0e, + 0xc7, 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0x2b, 0xa4, 0x4f, 0x74, 0x83, 0x1c, 0x1b, 0x34, 0x04, + 0x98, 0x14, 0x00, 0x2f, 0x4b, 0x80, 0x2b, 0x9b, 0x69, 0x06, 0x9c, 0x95, 0x41, 0xbb, 0xb0, 0xd0, + 0xb3, 0xb2, 0x50, 0x7e, 0x10, 0xbf, 0x22, 0xa1, 0x16, 0x8e, 0xb2, 0x2c, 0x38, 0x4f, 0x0e, 0x7d, + 0x0a, 0xd0, 0x0e, 0x6e, 0x75, 0xaf, 0x31, 0x25, 0xca, 0xf0, 0x8d, 0x12, 0xc9, 0x16, 0xb6, 0x02, + 0x51, 0x09, 0x0c, 0x97, 0x3c, 0x1c, 0xc3, 0x44, 0xb7, 0xa1, 0xde, 0xb6, 0x0d, 0x43, 0x44, 0xfe, + 0x96, 0xdd, 0xb3, 0x98, 0x08, 0xde, 0x6a, 0x0b, 0xf1, 0xcb, 0x7e, 0x2b, 0x41, 0xc1, 0x29, 0x4e, + 0xf5, 0x8f, 0x4a, 0xfc, 0x9a, 0x09, 0xd2, 0x19, 0xdd, 0x4e, 0xb4, 0x3e, 0xaf, 0xa5, 0x5a, 0x9f, + 0xab, 0x59, 0x89, 0x58, 0xe7, 0xa3, 0xc3, 0x1c, 0x0f, 0x7e, 0xdd, 0xea, 0xfa, 0x07, 0x2e, 0x4b, + 0xe2, 0x5b, 0x67, 0xa6, 0x52, 0xc8, 0x1d, 0xbb, 0x18, 0xaf, 0x88, 0x33, 0x8f, 0x13, 0x71, 0x12, + 0x59, 0xbd, 0x0b, 0xf5, 0x64, 0x1e, 0x26, 0x7a, 0x7a, 0xe5, 0xdc, 0x9e, 0xfe, 0x6b, 0x05, 0x96, + 0x0a, 0xb4, 0x23, 0x03, 0xea, 0x26, 0x79, 0x12, 0x3b, 0xe6, 0x73, 0x7b, 0x63, 0x3e, 0x35, 0x69, + 0xfe, 0xd4, 0xa4, 0x3d, 0xb0, 0xd8, 0x9e, 0x7b, 0xc0, 0x5c, 0xdd, 0xea, 0xfa, 0xe7, 0xb0, 0x9b, + 0xc0, 0xc2, 0x29, 0x6c, 0xf4, 0x09, 0xd4, 0x4c, 0xf2, 0xe4, 0xa0, 0xe7, 0x76, 0xf3, 0xfc, 0x55, + 0x4e, 0x8f, 0xb8, 0x3f, 0x76, 0x25, 0x0a, 0x0e, 0xf1, 0xd4, 0x3f, 0x29, 0xb0, 0x96, 0xd8, 0x25, + 0xaf, 0x15, 0xf4, 0x71, 0xcf, 0x38, 0xa0, 0xd1, 0x89, 0xbf, 0x09, 0x33, 0x0e, 0x71, 0x99, 0x1e, + 0xd6, 0x8b, 0x6a, 0x6b, 0x6e, 0x34, 0x5c, 0x9d, 0xd9, 0x0f, 0x16, 0x71, 0x44, 0xcf, 0xf1, 0x4d, + 0xe5, 0xc5, 0xf9, 0x46, 0xfd, 0x8f, 0x02, 0xd5, 0x83, 0x36, 0x31, 0xe8, 0x05, 0x4c, 0x2a, 0xdb, + 0x89, 0x49, 0x45, 0x2d, 0x8c, 0x59, 0x61, 0x4f, 0xe1, 0x90, 0xb2, 0x93, 0x1a, 0x52, 0xae, 0x9d, + 0x83, 0x73, 0xf6, 0x7c, 0xf2, 0x3e, 0xcc, 0x84, 0xea, 0x12, 0x45, 0x59, 0x39, 0xaf, 0x28, 0xab, + 0xbf, 0xae, 0xc0, 0x6c, 0x4c, 0xc5, 0x78, 0xd2, 0xdc, 0xdd, 0xb1, 0xbe, 0x86, 0x17, 0xae, 0x8d, + 0x32, 0x1b, 0xd1, 0x82, 0x1e, 0xc6, 0x6f, 0x17, 0xa3, 0x66, 0x21, 0xdb, 0xda, 0xdc, 0x85, 0x3a, + 0x23, 0x6e, 0x97, 0xb2, 0x80, 0x26, 0x1c, 0x36, 0x13, 0xcd, 0x2a, 0x87, 0x09, 0x2a, 0x4e, 0x71, + 0x2f, 0xdf, 0x81, 0xb9, 0x84, 0xb2, 0xb1, 0x7a, 0xbe, 0x2f, 0xb8, 0x73, 0xa2, 0x54, 0xb8, 0x80, + 0xe8, 0xfa, 0x30, 0x11, 0x5d, 0xeb, 0xc5, 0xce, 0x8c, 0x25, 0x68, 0x51, 0x8c, 0xe1, 0x54, 0x8c, + 0xbd, 0x51, 0x0a, 0xed, 0xec, 0x48, 0xfb, 0x67, 0x05, 0x16, 0x63, 0xdc, 0xd1, 0x28, 0xfc, 0xbd, + 0xc4, 0x7d, 0xb0, 0x9e, 0xba, 0x0f, 0x1a, 0x79, 0x32, 0x2f, 0x6c, 0x16, 0xce, 0x9f, 0x4f, 0x27, + 0xfe, 0x1f, 0xe7, 0xd3, 0x3f, 0x28, 0x30, 0x1f, 0xf3, 0xdd, 0x05, 0x0c, 0xa8, 0x0f, 0x92, 0x03, + 0xea, 0xb5, 0x32, 0x41, 0x53, 0x30, 0xa1, 0xde, 0x86, 0x85, 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d, + 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8, 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88, + 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea, 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e, + 0xda, 0x46, 0xcf, 0xa4, 0x5b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9, 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1, + 0xb7, 0x07, 0x88, 0xc0, 0xec, 0xe7, 0x27, 0xd4, 0xda, 0xa6, 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1, + 0x07, 0x12, 0x7e, 0xf6, 0x51, 0x44, 0x7a, 0x3e, 0x5c, 0x5d, 0x2f, 0x83, 0x28, 0x22, 0x34, 0x8e, + 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60, 0xbd, 0x1b, 0x64, 0xf4, 0xa3, 0x90, + 0x32, 0x96, 0x82, 0x18, 0xa2, 0xfa, 0xdb, 0x5a, 0xe2, 0xbc, 0xbf, 0xf1, 0x63, 0xe6, 0xcf, 0x61, + 0xb1, 0x1f, 0x79, 0x27, 0x60, 0xe0, 0x6d, 0xf9, 0x44, 0xfa, 0xe9, 0x2e, 0x84, 0xcf, 0xf3, 0x6b, + 0xeb, 0xdb, 0x52, 0xc9, 0xe2, 0xc3, 0x1c, 0x38, 0x9c, 0xab, 0x04, 0x7d, 0x17, 0x66, 0xf9, 0x48, + 0xa3, 0xb7, 0xe9, 0x47, 0xc4, 0x0c, 0x72, 0x71, 0x21, 0x88, 0x97, 0x83, 0x88, 0x84, 0xe3, 0x7c, + 0xe8, 0x04, 0x16, 0x1c, 0xbb, 0xb3, 0x4b, 0x2c, 0xd2, 0xa5, 0xbc, 0x11, 0xf4, 0x8f, 0x52, 0xcc, + 0x9e, 0x33, 0xad, 0x77, 0x83, 0xf6, 0x7f, 0x3f, 0xcb, 0xf2, 0x9c, 0x0f, 0x71, 0xd9, 0x65, 0x11, + 0x04, 0x79, 0x90, 0xc8, 0x85, 0x7a, 0x4f, 0xf6, 0x63, 0x72, 0x14, 0xf7, 0x1f, 0xd9, 0x36, 0xca, + 0x24, 0xe5, 0x51, 0x42, 0x32, 0xba, 0x30, 0x93, 0xeb, 0x38, 0xa5, 0xa1, 0x70, 0xb4, 0xae, 0xfd, + 0x4f, 0xa3, 0x75, 0xce, 0xac, 0x3f, 0x33, 0xe6, 0xac, 0xff, 0x17, 0x05, 0xae, 0x39, 0x25, 0x72, + 0xa9, 0x01, 0xc2, 0x37, 0xf7, 0xcb, 0xf8, 0xa6, 0x4c, 0x6e, 0xb6, 0xd6, 0x47, 0xc3, 0xd5, 0x6b, + 0x65, 0x38, 0x71, 0x29, 0xfb, 0xd0, 0x43, 0xa8, 0xd9, 0xb2, 0x06, 0x36, 0x66, 0x85, 0xad, 0x37, + 0xca, 0xd8, 0x1a, 0xd4, 0x4d, 0x3f, 0x2d, 0x83, 0x2f, 0x1c, 0x62, 0xa9, 0xbf, 0xab, 0xc2, 0x95, + 0xcc, 0x0d, 0x8e, 0x7e, 0x78, 0xc6, 0x9c, 0x7f, 0xf5, 0x85, 0xcd, 0xf8, 0x99, 0x01, 0x7d, 0x62, + 0x8c, 0x01, 0x7d, 0x13, 0xe6, 0xdb, 0x3d, 0xd7, 0xa5, 0x16, 0x4b, 0x8d, 0xe7, 0x61, 0xb0, 0x6c, + 0x25, 0xc9, 0x38, 0xcd, 0x9f, 0xf7, 0xc6, 0x50, 0x1d, 0xf3, 0x8d, 0x21, 0x6e, 0x85, 0x9c, 0x13, + 0xfd, 0xd4, 0xce, 0x5a, 0x21, 0xc7, 0xc5, 0x34, 0x3f, 0x6f, 0x5a, 0x7d, 0xd4, 0x10, 0x61, 0x3a, + 0xd9, 0xb4, 0x1e, 0x25, 0xa8, 0x38, 0xc5, 0x9d, 0x33, 0xaf, 0xcf, 0x94, 0x9d, 0xd7, 0x11, 0x49, + 0xbc, 0x26, 0x80, 0xa8, 0xa3, 0x37, 0xcb, 0xc4, 0x59, 0xf9, 0xe7, 0x84, 0xdc, 0x87, 0x94, 0xd9, + 0xf1, 0x1f, 0x52, 0xd4, 0xbf, 0x2a, 0xf0, 0x72, 0x61, 0xc5, 0x42, 0x9b, 0x89, 0x96, 0xf2, 0x66, + 0xaa, 0xa5, 0xfc, 0x4e, 0xa1, 0x60, 0xac, 0xaf, 0x74, 0xf3, 0x5f, 0x1a, 0xde, 0x2f, 0xf7, 0xd2, + 0x90, 0x33, 0x05, 0x9f, 0xff, 0xe4, 0xd0, 0xfa, 0xfe, 0xd3, 0x67, 0x2b, 0x97, 0xbe, 0x7c, 0xb6, + 0x72, 0xe9, 0xab, 0x67, 0x2b, 0x97, 0x7e, 0x31, 0x5a, 0x51, 0x9e, 0x8e, 0x56, 0x94, 0x2f, 0x47, + 0x2b, 0xca, 0x57, 0xa3, 0x15, 0xe5, 0xef, 0xa3, 0x15, 0xe5, 0x57, 0x5f, 0xaf, 0x5c, 0xfa, 0x64, + 0xa9, 0xe0, 0xdf, 0xe8, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xc9, 0xe6, 0x8c, 0xa7, 0x1e, + 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -1429,6 +1462,18 @@ func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (in _ = i var l int _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.Partition != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) i-- @@ -1724,6 +1769,32 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetOrdinals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetOrdinals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetOrdinals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1777,6 +1848,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ordinals != nil { + { + size, err := m.Ordinals.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.PersistentVolumeClaimRetentionPolicy != nil { { size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) @@ -2196,6 +2279,10 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { if m.Partition != nil { n += 1 + sovGenerated(uint64(*m.Partition)) } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2295,6 +2382,16 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetOrdinals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Start)) + return n +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { if m == nil { return 0 @@ -2343,6 +2440,10 @@ func (m *StatefulSetSpec) Size() (n int) { l = m.PersistentVolumeClaimRetentionPolicy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Ordinals != nil { + l = m.Ordinals.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2568,6 +2669,7 @@ func (this *RollingUpdateStatefulSetStrategy) String() string { } s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2658,6 +2760,16 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetOrdinals) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetOrdinals{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { if this == nil { return "nil" @@ -2689,6 +2801,7 @@ func (this *StatefulSetSpec) String() string { `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, + `Ordinals:` + strings.Replace(this.Ordinals.String(), "StatefulSetOrdinals", "StatefulSetOrdinals", 1) + `,`, `}`, }, "") return s @@ -4603,6 +4716,42 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } } m.Partition = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5547,6 +5696,75 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetOrdinals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetOrdinals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetOrdinals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5985,6 +6203,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordinals", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ordinals == nil { + m.Ordinals = &StatefulSetOrdinals{} + } + if err := m.Ordinals.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 2027523a6..4b0fa366c 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/apps/v1beta1"; // DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the // release notes for more information. @@ -45,12 +45,12 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + // data is the serialized representation of the state. + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. optional int64 revision = 3; } @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -70,7 +70,7 @@ message ControllerRevisionList { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -90,10 +90,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -106,7 +106,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -128,46 +128,47 @@ message DeploymentRollback { // DeploymentSpec is the specification of the desired behavior of the Deployment. message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional optional int32 replicas = 1; - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + // The only allowed template.spec.restartPolicy value is "Always". + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional optional int32 minReadySeconds = 5; - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional optional int32 revisionHistoryLimit = 6; - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional optional bool paused = 7; // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional optional RollbackConfig rollbackTo = 8; - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -178,15 +179,15 @@ message DeploymentSpec { // DeploymentStatus is the most recently observed status of the Deployment. message DeploymentStatus { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional optional int64 observedGeneration = 1; - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional optional int32 replicas = 2; - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional optional int32 updatedReplicas = 3; @@ -198,18 +199,20 @@ message DeploymentStatus { // +optional optional int32 availableReplicas = 4; - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional optional int32 unavailableReplicas = 5; - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -251,7 +254,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -265,48 +268,60 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. message RollingUpdateStatefulSetStrategy { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. + // Partition indicates the ordinal at which the StatefulSet should be partitioned + // for updates. During a rolling update, all pods from ordinal Replicas-1 to + // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. + // This is helpful in being able to do a canary based deployment. The default value is 0. optional int32 partition = 1; + + // maxUnavailable is the maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. This can not be 0. + // Defaults to 1. This field is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to + // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it + // will be counted towards MaxUnavailable. + // +optional + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional map selector = 2; - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -327,7 +342,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -349,7 +364,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -363,21 +378,36 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +message StatefulSetOrdinals { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + optional int32 start = 1; +} + // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. optional string whenDeleted = 1; - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -399,13 +429,15 @@ message StatefulSetSpec { // If empty, defaulted to labels on the pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -415,7 +447,8 @@ message StatefulSetSpec { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + // +listType=atomic + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -446,10 +479,9 @@ message StatefulSetSpec { // StatefulSetSpec version. The default value is 10. optional int32 revisionHistoryLimit = 8; - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; @@ -458,6 +490,12 @@ message StatefulSetSpec { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. + // +optional + optional StatefulSetOrdinals ordinals = 11; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -495,14 +533,15 @@ message StatefulSetStatus { // +optional optional int32 collisionCount = 9; - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated StatefulSetCondition conditions = 10; - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 19389d8b9..07bfa88c5 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -31,21 +31,21 @@ const ( // ScaleSpec describes the attributes of a scale subresource type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the number of observed instances of the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replias is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - // label selector for pods that should match the replicas count. This is a serializated + // targetSelector is the label selector for pods that should match the replicas count. This is a serializated // version of both map-based and more expressive set-based selectors. This is done to // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this @@ -68,11 +68,11 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -154,9 +154,20 @@ const ( // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. type RollingUpdateStatefulSetStrategy struct { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. + // Partition indicates the ordinal at which the StatefulSet should be partitioned + // for updates. During a rolling update, all pods from ordinal Replicas-1 to + // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. + // This is helpful in being able to do a canary based deployment. The default value is 0. Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` + // maxUnavailable is the maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. This can not be 0. + // Defaults to 1. This field is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to + // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it + // will be counted towards MaxUnavailable. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"` } // PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine @@ -180,12 +191,12 @@ const ( // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. type StatefulSetPersistentVolumeClaimRetentionPolicy struct { - // WhenDeleted specifies what happens to PVCs created from StatefulSet + // whenDeleted specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is deleted. The default policy // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The // `Delete` policy causes those PVCs to be deleted. WhenDeleted PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty" protobuf:"bytes,1,opt,name=whenDeleted,casttype=PersistentVolumeClaimRetentionPolicyType"` - // WhenScaled specifies what happens to PVCs created from StatefulSet + // whenScaled specifies what happens to PVCs created from StatefulSet // VolumeClaimTemplates when the StatefulSet is scaled down. The default // policy of `Retain` causes PVCs to not be affected by a scaledown. The // `Delete` policy causes the associated PVCs for any excess pods above @@ -193,6 +204,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct { WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +type StatefulSetOrdinals struct { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + Start int32 `json:"start" protobuf:"varint,1,opt,name=start"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -212,7 +238,9 @@ type StatefulSetSpec struct { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -223,6 +251,7 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional + // +listType=atomic VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` // serviceName is the name of the service that governs this StatefulSet. @@ -254,10 +283,9 @@ type StatefulSetSpec struct { // StatefulSetSpec version. The default value is 10. RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` @@ -266,6 +294,12 @@ type StatefulSetSpec struct { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. + // +optional + Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -303,14 +337,15 @@ type StatefulSetStatus struct { // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - // Represents the latest available observations of a statefulset's current state. + // conditions represent the latest available observations of a statefulset's current state. // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` - // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. + // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -375,17 +410,18 @@ type Deployment struct { // DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit + // replicas is the number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. // +optional Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - // Label selector for pods. Existing ReplicaSets whose pods are + // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -393,28 +429,28 @@ type DeploymentSpec struct { // +patchStrategy=retainKeys Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - // Minimum number of seconds for which a newly created pod should be ready + // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - // The number of old ReplicaSets to retain to allow rollback. + // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. // Defaults to 2. // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - // Indicates that the deployment is paused. + // paused indicates that the deployment is paused. // +optional Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. + // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done. // +optional RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` - // The maximum time in seconds for a deployment to make progress before it + // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it // is considered to be failed. The deployment controller will continue to // process failed deployments and a condition with a ProgressDeadlineExceeded // reason will be surfaced in the deployment status. Note that progress will @@ -513,15 +549,15 @@ type RollingUpdateDeployment struct { // DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { - // The generation observed by the deployment controller. + // observedGeneration is the generation observed by the deployment controller. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector). // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec. // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` @@ -533,18 +569,20 @@ type DeploymentStatus struct { // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - // Total number of unavailable pods targeted by this deployment. This is the total number of + // unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of // pods that are still required for the deployment to have 100% available capacity. They may // either be pods that are running but not yet available or pods that still have not been created. // +optional UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - // Represents the latest available observations of a deployment's current state. + // Conditions represent the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - // Count of hash collisions for the Deployment. The Deployment controller uses this + // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this // field as a collision avoidance mechanism when it needs to create the name for the // newest ReplicaSet. // +optional @@ -626,10 +664,10 @@ type ControllerRevision struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Data is the serialized representation of the state. + // data is the serialized representation of the state. Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - // Revision indicates the revision of the state represented by Data. + // revision indicates the revision of the state represented by Data. Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 5421116e1..9e7fb1adc 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", + "data": "data is the serialized representation of the state.", + "revision": "revision indicates the revision of the state represented by Data.", } func (ControllerRevision) SwaggerDoc() map[string]string { @@ -96,15 +96,15 @@ func (DeploymentRollback) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", + "replicas": "replicas is the number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "selector is the label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", - "paused": "Indicates that the deployment is paused.", - "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "paused indicates that the deployment is paused.", + "rollbackTo": "DEPRECATED. rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", } func (DeploymentSpec) SwaggerDoc() map[string]string { @@ -113,14 +113,14 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { var map_DeploymentStatus = map[string]string{ "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "observedGeneration": "observedGeneration is the generation observed by the deployment controller.", + "replicas": "replicas is the total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment that have the desired template spec.", "readyReplicas": "readyReplicas is the number of pods targeted by this Deployment controller with a Ready Condition.", "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Conditions represent the latest available observations of a deployment's current state.", + "collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", } func (DeploymentStatus) SwaggerDoc() map[string]string { @@ -157,8 +157,9 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { } var map_RollingUpdateStatefulSetStrategy = map[string]string{ - "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", - "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned.", + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", } func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { @@ -168,8 +169,8 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status defines current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -178,7 +179,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the number of observed instances of the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -187,9 +188,9 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "replicas": "replias is the actual number of observed instances of the scaled object.", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "targetSelector": "targetSelector is the label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { @@ -227,10 +228,19 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetOrdinals = map[string]string{ + "": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "start": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", +} + +func (StatefulSetOrdinals) SwaggerDoc() map[string]string { + return map_StatefulSetOrdinals +} + var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", - "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", - "whenScaled": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", + "whenDeleted": "whenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "whenScaled": "whenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", } func (StatefulSetPersistentVolumeClaimRetentionPolicy) SwaggerDoc() map[string]string { @@ -241,14 +251,15 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -265,8 +276,8 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", + "conditions": "conditions represent the latest available observations of a statefulset's current state.", + "availableReplicas": "availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index be3fcc75b..dd73f1a5a 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -342,6 +342,11 @@ func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateState *out = new(int32) **out = **in } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } return } @@ -500,6 +505,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals. +func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals { + if in == nil { + return nil + } + out := new(StatefulSetOrdinals) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { *out = *in @@ -548,6 +569,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) **out = **in } + if in.Ordinals != nil { + in, out := &in.Ordinals, &out.Ordinals + *out = new(StatefulSetOrdinals) + **out = **in + } return } diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index cd1a06e25..1c3d3be5b 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto +// source: k8s.io/api/apps/v1beta2/generated.proto package v1beta2 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } func (*ControllerRevision) ProtoMessage() {} func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{0} + return fileDescriptor_c423c016abf485d4, []int{0} } func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } func (*ControllerRevisionList) ProtoMessage() {} func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{1} + return fileDescriptor_c423c016abf485d4, []int{1} } func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{2} + return fileDescriptor_c423c016abf485d4, []int{2} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{3} + return fileDescriptor_c423c016abf485d4, []int{3} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{4} + return fileDescriptor_c423c016abf485d4, []int{4} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{5} + return fileDescriptor_c423c016abf485d4, []int{5} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{6} + return fileDescriptor_c423c016abf485d4, []int{6} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{7} + return fileDescriptor_c423c016abf485d4, []int{7} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{8} + return fileDescriptor_c423c016abf485d4, []int{8} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{9} + return fileDescriptor_c423c016abf485d4, []int{9} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +332,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{10} + return fileDescriptor_c423c016abf485d4, []int{10} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +360,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{11} + return fileDescriptor_c423c016abf485d4, []int{11} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +388,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{12} + return fileDescriptor_c423c016abf485d4, []int{12} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +416,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{13} + return fileDescriptor_c423c016abf485d4, []int{13} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +444,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{14} + return fileDescriptor_c423c016abf485d4, []int{14} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +472,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{15} + return fileDescriptor_c423c016abf485d4, []int{15} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +500,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{16} + return fileDescriptor_c423c016abf485d4, []int{16} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +528,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{17} + return fileDescriptor_c423c016abf485d4, []int{17} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +556,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{18} + return fileDescriptor_c423c016abf485d4, []int{18} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +584,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{19} + return fileDescriptor_c423c016abf485d4, []int{19} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +612,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{20} + return fileDescriptor_c423c016abf485d4, []int{20} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +640,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{21} + return fileDescriptor_c423c016abf485d4, []int{21} } func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +668,7 @@ var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{22} + return fileDescriptor_c423c016abf485d4, []int{22} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +696,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{23} + return fileDescriptor_c423c016abf485d4, []int{23} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +724,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{24} + return fileDescriptor_c423c016abf485d4, []int{24} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +752,7 @@ var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{25} + return fileDescriptor_c423c016abf485d4, []int{25} } func (m *StatefulSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +780,7 @@ var xxx_messageInfo_StatefulSet proto.InternalMessageInfo func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } func (*StatefulSetCondition) ProtoMessage() {} func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{26} + return fileDescriptor_c423c016abf485d4, []int{26} } func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +808,7 @@ var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{27} + return fileDescriptor_c423c016abf485d4, []int{27} } func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -833,12 +833,40 @@ func (m *StatefulSetList) XXX_DiscardUnknown() { var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} } +func (*StatefulSetOrdinals) ProtoMessage() {} +func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) { + return fileDescriptor_c423c016abf485d4, []int{28} +} +func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetOrdinals.Merge(m, src) +} +func (m *StatefulSetOrdinals) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetOrdinals) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() { *m = StatefulSetPersistentVolumeClaimRetentionPolicy{} } func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {} func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{28} + return fileDescriptor_c423c016abf485d4, []int{29} } func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -866,7 +894,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{29} + return fileDescriptor_c423c016abf485d4, []int{30} } func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -894,7 +922,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{30} + return fileDescriptor_c423c016abf485d4, []int{31} } func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -922,7 +950,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{31} + return fileDescriptor_c423c016abf485d4, []int{32} } func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -977,6 +1005,7 @@ func init() { proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") + proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1beta2.StatefulSetOrdinals") proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") @@ -984,154 +1013,157 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) -} - -var fileDescriptor_42fe616264472f7e = []byte{ - // 2284 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcf, 0x6f, 0x1b, 0xc7, - 0xf5, 0xf7, 0xf2, 0x87, 0x44, 0x8e, 0x2c, 0xc9, 0x1e, 0xe9, 0x2b, 0x31, 0xf2, 0xb7, 0xa4, 0xb1, - 0x31, 0x1c, 0x25, 0xb6, 0x48, 0x5b, 0xf9, 0x81, 0xc4, 0x2e, 0x92, 0x8a, 0x52, 0x6a, 0x3b, 0xd0, - 0x0f, 0x66, 0x64, 0x39, 0x68, 0xd0, 0x1f, 0x1e, 0x91, 0x63, 0x6a, 0xa3, 0xe5, 0xee, 0x62, 0x77, - 0x96, 0x31, 0xd1, 0x4b, 0xaf, 0x05, 0x0a, 0xb4, 0xbd, 0xf6, 0x9f, 0xe8, 0xad, 0x28, 0x9a, 0x5b, - 0x11, 0x04, 0x3e, 0x06, 0xbd, 0x24, 0xbd, 0x10, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x81, - 0x02, 0xc5, 0xcc, 0xce, 0xfe, 0xde, 0x35, 0x97, 0x8a, 0xa3, 0x34, 0x41, 0x6e, 0xe2, 0xbc, 0xf7, - 0x3e, 0xf3, 0xde, 0xcc, 0x7b, 0xf3, 0x3e, 0x33, 0x2b, 0xf0, 0x83, 0xe3, 0xd7, 0xad, 0xba, 0xa2, - 0x37, 0x8e, 0xed, 0x43, 0x62, 0x6a, 0x84, 0x12, 0xab, 0xd1, 0x27, 0x5a, 0x47, 0x37, 0x1b, 0x42, - 0x80, 0x0d, 0xa5, 0x81, 0x0d, 0xc3, 0x6a, 0xf4, 0xaf, 0x1f, 0x12, 0x8a, 0xd7, 0x1b, 0x5d, 0xa2, - 0x11, 0x13, 0x53, 0xd2, 0xa9, 0x1b, 0xa6, 0x4e, 0x75, 0xb8, 0xec, 0x28, 0xd6, 0xb1, 0xa1, 0xd4, - 0x99, 0x62, 0x5d, 0x28, 0xae, 0xac, 0x75, 0x15, 0x7a, 0x64, 0x1f, 0xd6, 0xdb, 0x7a, 0xaf, 0xd1, - 0xd5, 0xbb, 0x7a, 0x83, 0xeb, 0x1f, 0xda, 0x0f, 0xf8, 0x2f, 0xfe, 0x83, 0xff, 0xe5, 0xe0, 0xac, - 0xc8, 0x81, 0x09, 0xdb, 0xba, 0x49, 0x1a, 0xfd, 0xeb, 0xd1, 0xb9, 0x56, 0x5e, 0xf1, 0x75, 0x7a, - 0xb8, 0x7d, 0xa4, 0x68, 0xc4, 0x1c, 0x34, 0x8c, 0xe3, 0x2e, 0x1b, 0xb0, 0x1a, 0x3d, 0x42, 0x71, - 0x92, 0x55, 0x23, 0xcd, 0xca, 0xb4, 0x35, 0xaa, 0xf4, 0x48, 0xcc, 0xe0, 0xb5, 0x71, 0x06, 0x56, - 0xfb, 0x88, 0xf4, 0x70, 0xcc, 0xee, 0xe5, 0x34, 0x3b, 0x9b, 0x2a, 0x6a, 0x43, 0xd1, 0xa8, 0x45, - 0xcd, 0xa8, 0x91, 0xfc, 0x6f, 0x09, 0xc0, 0x4d, 0x5d, 0xa3, 0xa6, 0xae, 0xaa, 0xc4, 0x44, 0xa4, - 0xaf, 0x58, 0x8a, 0xae, 0xc1, 0xfb, 0xa0, 0xc4, 0xe2, 0xe9, 0x60, 0x8a, 0x2b, 0xd2, 0x45, 0x69, - 0x75, 0x66, 0xfd, 0x5a, 0xdd, 0x5f, 0x69, 0x0f, 0xbe, 0x6e, 0x1c, 0x77, 0xd9, 0x80, 0x55, 0x67, - 0xda, 0xf5, 0xfe, 0xf5, 0xfa, 0xde, 0xe1, 0x07, 0xa4, 0x4d, 0x77, 0x08, 0xc5, 0x4d, 0xf8, 0x68, - 0x58, 0x3b, 0x33, 0x1a, 0xd6, 0x80, 0x3f, 0x86, 0x3c, 0x54, 0xb8, 0x07, 0x0a, 0x1c, 0x3d, 0xc7, - 0xd1, 0xd7, 0x52, 0xd1, 0x45, 0xd0, 0x75, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x1a, 0x73, 0xaf, - 0x79, 0x56, 0x40, 0x17, 0xb6, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xab, 0xa0, 0x64, 0x0a, 0xf7, 0x2b, - 0xf9, 0x8b, 0xd2, 0x6a, 0xbe, 0x79, 0x4e, 0x68, 0x95, 0xdc, 0xb0, 0x90, 0xa7, 0x21, 0x3f, 0x92, - 0xc0, 0x52, 0x3c, 0xee, 0x6d, 0xc5, 0xa2, 0xf0, 0xc7, 0xb1, 0xd8, 0xeb, 0xd9, 0x62, 0x67, 0xd6, - 0x3c, 0x72, 0x6f, 0x62, 0x77, 0x24, 0x10, 0x77, 0x0b, 0x14, 0x15, 0x4a, 0x7a, 0x56, 0x25, 0x77, - 0x31, 0xbf, 0x3a, 0xb3, 0x7e, 0xa5, 0x9e, 0x92, 0xc0, 0xf5, 0xb8, 0x77, 0xcd, 0x59, 0x81, 0x5b, - 0xbc, 0xc3, 0x10, 0x90, 0x03, 0x24, 0xff, 0x32, 0x07, 0xca, 0x5b, 0x98, 0xf4, 0x74, 0x6d, 0x9f, - 0xd0, 0x53, 0xd8, 0xb9, 0xdb, 0xa0, 0x60, 0x19, 0xa4, 0x2d, 0x76, 0xee, 0x72, 0x6a, 0x00, 0x9e, - 0x4f, 0xfb, 0x06, 0x69, 0xfb, 0x5b, 0xc6, 0x7e, 0x21, 0x8e, 0x00, 0x5b, 0x60, 0xca, 0xa2, 0x98, - 0xda, 0x16, 0xdf, 0xb0, 0x99, 0xf5, 0xd5, 0x0c, 0x58, 0x5c, 0xbf, 0x39, 0x27, 0xd0, 0xa6, 0x9c, - 0xdf, 0x48, 0xe0, 0xc8, 0x7f, 0xcf, 0x01, 0xe8, 0xe9, 0x6e, 0xea, 0x5a, 0x47, 0xa1, 0x2c, 0x9d, - 0x6f, 0x80, 0x02, 0x1d, 0x18, 0x84, 0x2f, 0x48, 0xb9, 0x79, 0xd9, 0x75, 0xe5, 0xee, 0xc0, 0x20, - 0x4f, 0x86, 0xb5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0xb7, 0x3d, 0x27, 0x73, 0xdc, 0xfa, - 0x95, 0xf0, 0xd4, 0x4f, 0x86, 0xb5, 0x84, 0xb3, 0xa3, 0xee, 0x21, 0x85, 0x1d, 0x84, 0x7d, 0x00, - 0x55, 0x6c, 0xd1, 0xbb, 0x26, 0xd6, 0x2c, 0x67, 0x26, 0xa5, 0x47, 0x44, 0xf8, 0x2f, 0x65, 0xdb, - 0x28, 0x66, 0xd1, 0x5c, 0x11, 0x5e, 0xc0, 0xed, 0x18, 0x1a, 0x4a, 0x98, 0x01, 0x5e, 0x06, 0x53, - 0x26, 0xc1, 0x96, 0xae, 0x55, 0x0a, 0x3c, 0x0a, 0x6f, 0x01, 0x11, 0x1f, 0x45, 0x42, 0x0a, 0x5f, - 0x04, 0xd3, 0x3d, 0x62, 0x59, 0xb8, 0x4b, 0x2a, 0x45, 0xae, 0x38, 0x2f, 0x14, 0xa7, 0x77, 0x9c, - 0x61, 0xe4, 0xca, 0xe5, 0x3f, 0x48, 0x60, 0xd6, 0x5b, 0xb9, 0x53, 0xa8, 0x9c, 0x5b, 0xe1, 0xca, - 0x91, 0xc7, 0x27, 0x4b, 0x4a, 0xc1, 0x7c, 0x9c, 0x0f, 0x38, 0xce, 0xd2, 0x11, 0xfe, 0x04, 0x94, - 0x2c, 0xa2, 0x92, 0x36, 0xd5, 0x4d, 0xe1, 0xf8, 0xcb, 0x19, 0x1d, 0xc7, 0x87, 0x44, 0xdd, 0x17, - 0xa6, 0xcd, 0xb3, 0xcc, 0x73, 0xf7, 0x17, 0xf2, 0x20, 0xe1, 0xbb, 0xa0, 0x44, 0x49, 0xcf, 0x50, - 0x31, 0x25, 0xa2, 0x6a, 0x9e, 0x0f, 0x3a, 0xcf, 0x72, 0x86, 0x81, 0xb5, 0xf4, 0xce, 0x5d, 0xa1, - 0xc6, 0x4b, 0xc6, 0x5b, 0x0c, 0x77, 0x14, 0x79, 0x30, 0xd0, 0x00, 0x73, 0xb6, 0xd1, 0x61, 0x9a, - 0x94, 0x1d, 0xe7, 0xdd, 0x81, 0xc8, 0xa1, 0x6b, 0xe3, 0x57, 0xe5, 0x20, 0x64, 0xd7, 0x5c, 0x12, - 0xb3, 0xcc, 0x85, 0xc7, 0x51, 0x04, 0x1f, 0x6e, 0x80, 0xf9, 0x9e, 0xa2, 0x21, 0x82, 0x3b, 0x83, - 0x7d, 0xd2, 0xd6, 0xb5, 0x8e, 0xc5, 0x53, 0xa9, 0xd8, 0x5c, 0x16, 0x00, 0xf3, 0x3b, 0x61, 0x31, - 0x8a, 0xea, 0xc3, 0x6d, 0xb0, 0xe8, 0x1e, 0xc0, 0xb7, 0x15, 0x8b, 0xea, 0xe6, 0x60, 0x5b, 0xe9, - 0x29, 0xb4, 0x32, 0xc5, 0x71, 0x2a, 0xa3, 0x61, 0x6d, 0x11, 0x25, 0xc8, 0x51, 0xa2, 0x95, 0xfc, - 0xdb, 0x29, 0x30, 0x1f, 0x39, 0x17, 0xe0, 0x3d, 0xb0, 0xd4, 0xb6, 0x4d, 0x93, 0x68, 0x74, 0xd7, - 0xee, 0x1d, 0x12, 0x73, 0xbf, 0x7d, 0x44, 0x3a, 0xb6, 0x4a, 0x3a, 0x7c, 0x5b, 0x8b, 0xcd, 0xaa, - 0xf0, 0x75, 0x69, 0x33, 0x51, 0x0b, 0xa5, 0x58, 0xc3, 0x77, 0x00, 0xd4, 0xf8, 0xd0, 0x8e, 0x62, - 0x59, 0x1e, 0x66, 0x8e, 0x63, 0x7a, 0xa5, 0xb8, 0x1b, 0xd3, 0x40, 0x09, 0x56, 0xcc, 0xc7, 0x0e, - 0xb1, 0x14, 0x93, 0x74, 0xa2, 0x3e, 0xe6, 0xc3, 0x3e, 0x6e, 0x25, 0x6a, 0xa1, 0x14, 0x6b, 0xf8, - 0x2a, 0x98, 0x71, 0x66, 0xe3, 0x6b, 0x2e, 0x36, 0x67, 0x41, 0x80, 0xcd, 0xec, 0xfa, 0x22, 0x14, - 0xd4, 0x63, 0xa1, 0xe9, 0x87, 0x16, 0x31, 0xfb, 0xa4, 0x73, 0xcb, 0x21, 0x07, 0xac, 0x83, 0x16, - 0x79, 0x07, 0xf5, 0x42, 0xdb, 0x8b, 0x69, 0xa0, 0x04, 0x2b, 0x16, 0x9a, 0x93, 0x35, 0xb1, 0xd0, - 0xa6, 0xc2, 0xa1, 0x1d, 0x24, 0x6a, 0xa1, 0x14, 0x6b, 0x96, 0x7b, 0x8e, 0xcb, 0x1b, 0x7d, 0xac, - 0xa8, 0xf8, 0x50, 0x25, 0x95, 0xe9, 0x70, 0xee, 0xed, 0x86, 0xc5, 0x28, 0xaa, 0x0f, 0x6f, 0x81, - 0xf3, 0xce, 0xd0, 0x81, 0x86, 0x3d, 0x90, 0x12, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xdf, 0x8d, 0x2a, - 0xa0, 0xb8, 0x0d, 0xbc, 0x01, 0xe6, 0xda, 0xba, 0xaa, 0xf2, 0x7c, 0xdc, 0xd4, 0x6d, 0x8d, 0x56, - 0xca, 0x1c, 0x05, 0xb2, 0x1a, 0xda, 0x0c, 0x49, 0x50, 0x44, 0x13, 0xfe, 0x0c, 0x80, 0xb6, 0xdb, - 0x18, 0xac, 0x0a, 0x18, 0xc3, 0x00, 0xe2, 0x6d, 0xc9, 0xef, 0xcc, 0xde, 0x90, 0x85, 0x02, 0x90, - 0xf2, 0xc7, 0x12, 0x58, 0x4e, 0x29, 0x74, 0xf8, 0x56, 0xa8, 0x09, 0x5e, 0x89, 0x34, 0xc1, 0x0b, - 0x29, 0x66, 0x81, 0x4e, 0x78, 0x04, 0x66, 0x19, 0x21, 0x51, 0xb4, 0xae, 0xa3, 0x22, 0xce, 0xb2, - 0x46, 0x6a, 0x00, 0x28, 0xa8, 0xed, 0x9f, 0xca, 0xe7, 0x47, 0xc3, 0xda, 0x6c, 0x48, 0x86, 0xc2, - 0xc0, 0xf2, 0xaf, 0x72, 0x00, 0x6c, 0x11, 0x43, 0xd5, 0x07, 0x3d, 0xa2, 0x9d, 0x06, 0xa7, 0xb9, - 0x13, 0xe2, 0x34, 0x2f, 0xa4, 0x6f, 0x89, 0xe7, 0x54, 0x2a, 0xa9, 0x79, 0x37, 0x42, 0x6a, 0x5e, - 0xcc, 0x02, 0xf6, 0x74, 0x56, 0xf3, 0x59, 0x1e, 0x2c, 0xf8, 0xca, 0x3e, 0xad, 0xb9, 0x19, 0xda, - 0xd1, 0x17, 0x22, 0x3b, 0xba, 0x9c, 0x60, 0xf2, 0x95, 0xf1, 0x9a, 0x0f, 0xc0, 0x1c, 0x63, 0x1d, - 0xce, 0xfe, 0x71, 0x4e, 0x33, 0x35, 0x31, 0xa7, 0xf1, 0x3a, 0xd1, 0x76, 0x08, 0x09, 0x45, 0x90, - 0x53, 0x38, 0xd4, 0xf4, 0x37, 0x91, 0x43, 0xfd, 0x51, 0x02, 0x73, 0xfe, 0x36, 0x9d, 0x02, 0x89, - 0xba, 0x1d, 0x26, 0x51, 0xcf, 0x67, 0x48, 0xce, 0x14, 0x16, 0xf5, 0x59, 0x21, 0xe8, 0x3a, 0xa7, - 0x51, 0xab, 0xec, 0x0a, 0x66, 0xa8, 0x4a, 0x1b, 0x5b, 0xa2, 0xdf, 0x9e, 0x75, 0xae, 0x5f, 0xce, - 0x18, 0xf2, 0xa4, 0x21, 0xc2, 0x95, 0xfb, 0x6a, 0x09, 0x57, 0xfe, 0xd9, 0x10, 0xae, 0x1f, 0x81, - 0x92, 0xe5, 0x52, 0xad, 0x02, 0x87, 0xbc, 0x92, 0xa9, 0xb0, 0x05, 0xcb, 0xf2, 0xa0, 0x3d, 0x7e, - 0xe5, 0xc1, 0x25, 0x31, 0xab, 0xe2, 0xd7, 0xc9, 0xac, 0x58, 0xa2, 0x1b, 0xd8, 0xb6, 0x48, 0x87, - 0x17, 0x55, 0xc9, 0x4f, 0xf4, 0x16, 0x1f, 0x45, 0x42, 0x0a, 0x0f, 0xc0, 0xb2, 0x61, 0xea, 0x5d, - 0x93, 0x58, 0xd6, 0x16, 0xc1, 0x1d, 0x55, 0xd1, 0x88, 0x1b, 0x80, 0xd3, 0x13, 0x2f, 0x8c, 0x86, - 0xb5, 0xe5, 0x56, 0xb2, 0x0a, 0x4a, 0xb3, 0x95, 0xff, 0x5c, 0x00, 0xe7, 0xa2, 0x67, 0x63, 0x0a, - 0x4d, 0x91, 0x4e, 0x44, 0x53, 0xae, 0x06, 0xf2, 0xd4, 0xe1, 0x70, 0x81, 0xa7, 0x82, 0x58, 0xae, - 0x6e, 0x80, 0x79, 0x41, 0x4b, 0x5c, 0xa1, 0x20, 0x6a, 0xde, 0xf6, 0x1c, 0x84, 0xc5, 0x28, 0xaa, - 0x0f, 0x6f, 0x82, 0x59, 0x93, 0x33, 0x2f, 0x17, 0xc0, 0x61, 0x2f, 0xff, 0x27, 0x00, 0x66, 0x51, - 0x50, 0x88, 0xc2, 0xba, 0x8c, 0xb9, 0xf8, 0x84, 0xc4, 0x05, 0x28, 0x84, 0x99, 0xcb, 0x46, 0x54, - 0x01, 0xc5, 0x6d, 0xe0, 0x0e, 0x58, 0xb0, 0xb5, 0x38, 0x94, 0x93, 0x6b, 0x17, 0x04, 0xd4, 0xc2, - 0x41, 0x5c, 0x05, 0x25, 0xd9, 0xc1, 0xfb, 0x21, 0x32, 0x33, 0xc5, 0xcf, 0x93, 0xab, 0x19, 0x6a, - 0x22, 0x33, 0x9b, 0x49, 0xa0, 0x5a, 0xa5, 0xac, 0x54, 0x4b, 0xfe, 0x48, 0x02, 0x30, 0x5e, 0x87, - 0x63, 0x5f, 0x02, 0x62, 0x16, 0x81, 0x8e, 0xa9, 0x24, 0xf3, 0x9f, 0x6b, 0x19, 0xf9, 0x8f, 0x7f, - 0xa0, 0x66, 0x23, 0x40, 0x62, 0xa1, 0x4f, 0xe7, 0x51, 0x27, 0x2b, 0x01, 0xf2, 0x9d, 0x7a, 0x06, - 0x04, 0x28, 0x00, 0xf6, 0x74, 0x02, 0xf4, 0x8f, 0x1c, 0x58, 0xf0, 0x95, 0x33, 0x13, 0xa0, 0x04, - 0x93, 0xef, 0x1e, 0x76, 0xb2, 0x91, 0x12, 0x7f, 0xe9, 0xfe, 0x97, 0x48, 0x89, 0xef, 0x55, 0x0a, - 0x29, 0xf9, 0x7d, 0x2e, 0xe8, 0xfa, 0x84, 0xa4, 0xe4, 0x19, 0xbc, 0x70, 0x7c, 0xe3, 0x78, 0x8d, - 0xfc, 0x49, 0x1e, 0x9c, 0x8b, 0xd6, 0x61, 0xa8, 0x41, 0x4a, 0x63, 0x1b, 0x64, 0x0b, 0x2c, 0x3e, - 0xb0, 0x55, 0x75, 0xc0, 0x63, 0x08, 0x74, 0x49, 0xa7, 0xb5, 0xfe, 0xbf, 0xb0, 0x5c, 0xfc, 0x61, - 0x82, 0x0e, 0x4a, 0xb4, 0x8c, 0xf7, 0xcb, 0xc2, 0x97, 0xed, 0x97, 0xc5, 0x13, 0xf4, 0xcb, 0x64, - 0xca, 0x91, 0x3f, 0x11, 0xe5, 0x98, 0xac, 0x59, 0x26, 0x1c, 0x5c, 0x63, 0xaf, 0xfe, 0x23, 0x09, - 0x2c, 0x25, 0x5f, 0xb8, 0xa1, 0x0a, 0xe6, 0x7a, 0xf8, 0x61, 0xf0, 0xe1, 0x63, 0x5c, 0x13, 0xb1, - 0xa9, 0xa2, 0xd6, 0x9d, 0x4f, 0x46, 0xf5, 0x3b, 0x1a, 0xdd, 0x33, 0xf7, 0xa9, 0xa9, 0x68, 0x5d, - 0xa7, 0xf3, 0xee, 0x84, 0xb0, 0x50, 0x04, 0x1b, 0xbe, 0x0f, 0x4a, 0x3d, 0xfc, 0x70, 0xdf, 0x36, - 0xbb, 0x49, 0x1d, 0x32, 0xdb, 0x3c, 0xbc, 0x00, 0x76, 0x04, 0x0a, 0xf2, 0xf0, 0xe4, 0x2f, 0x24, - 0xb0, 0x9c, 0xd2, 0x55, 0xbf, 0x45, 0x51, 0xee, 0x81, 0x8b, 0xa1, 0x20, 0x59, 0x55, 0x92, 0x07, - 0xb6, 0xca, 0x0b, 0x54, 0x10, 0x99, 0x2b, 0xa0, 0x6c, 0x60, 0x93, 0x2a, 0x1e, 0x0d, 0x2e, 0x36, - 0x67, 0x47, 0xc3, 0x5a, 0xb9, 0xe5, 0x0e, 0x22, 0x5f, 0x2e, 0xff, 0x47, 0x02, 0xc5, 0xfd, 0x36, - 0x56, 0xc9, 0x29, 0x30, 0x89, 0xad, 0x10, 0x93, 0x48, 0x7f, 0xa5, 0xe7, 0xfe, 0xa4, 0x92, 0x88, - 0xed, 0x08, 0x89, 0xb8, 0x34, 0x06, 0xe7, 0xe9, 0xfc, 0xe1, 0x0d, 0x50, 0xf6, 0xa6, 0x9b, 0xec, - 0x70, 0x93, 0x7f, 0x97, 0x03, 0x33, 0x81, 0x29, 0x26, 0x3c, 0x1a, 0xef, 0x87, 0xfa, 0x01, 0x2b, - 0xfa, 0xf5, 0x2c, 0x81, 0xd4, 0xdd, 0xb3, 0xff, 0x6d, 0x8d, 0x9a, 0xc1, 0xcb, 0x63, 0xbc, 0x25, - 0xbc, 0x09, 0xe6, 0x28, 0x36, 0xbb, 0x84, 0xba, 0x32, 0xbe, 0x60, 0x65, 0xff, 0x31, 0xe5, 0x6e, - 0x48, 0x8a, 0x22, 0xda, 0x2b, 0x37, 0xc1, 0x6c, 0x68, 0x32, 0x78, 0x0e, 0xe4, 0x8f, 0xc9, 0xc0, - 0xa1, 0x54, 0x88, 0xfd, 0x09, 0x17, 0x41, 0xb1, 0x8f, 0x55, 0xdb, 0xc9, 0xf3, 0x32, 0x72, 0x7e, - 0xdc, 0xc8, 0xbd, 0x2e, 0xc9, 0xbf, 0x66, 0x8b, 0xe3, 0x27, 0xe7, 0x29, 0x64, 0xd7, 0x3b, 0xa1, - 0xec, 0x4a, 0xff, 0x60, 0x18, 0x2c, 0x99, 0xb4, 0x1c, 0x43, 0x91, 0x1c, 0x7b, 0x29, 0x13, 0xda, - 0xd3, 0x33, 0xed, 0x9f, 0x39, 0xb0, 0x18, 0xd0, 0xf6, 0xa9, 0xea, 0xf7, 0x43, 0x54, 0x75, 0x35, - 0x42, 0x55, 0x2b, 0x49, 0x36, 0xdf, 0x71, 0xd5, 0xf1, 0x5c, 0xf5, 0x4f, 0x12, 0x98, 0x0f, 0xac, - 0xdd, 0x29, 0x90, 0xd5, 0x3b, 0x61, 0xb2, 0x7a, 0x29, 0x4b, 0xd2, 0xa4, 0xb0, 0xd5, 0x7f, 0x49, - 0xa0, 0x11, 0xd0, 0x6a, 0x11, 0xd3, 0x52, 0x2c, 0x4a, 0x34, 0x7a, 0x4f, 0x57, 0xed, 0x1e, 0xd9, - 0x54, 0xb1, 0xd2, 0x43, 0x84, 0x0d, 0x28, 0xba, 0xd6, 0xd2, 0x55, 0xa5, 0x3d, 0x80, 0x18, 0xcc, - 0x7c, 0x78, 0x44, 0xb4, 0x2d, 0xa2, 0x12, 0x2a, 0x3e, 0x6b, 0x95, 0x9b, 0x6f, 0xb9, 0x5f, 0x79, - 0xde, 0xf3, 0x45, 0x4f, 0x86, 0xb5, 0xd5, 0x2c, 0x88, 0x3c, 0xcb, 0x82, 0x98, 0xf0, 0xa7, 0x00, - 0xb0, 0x9f, 0xfc, 0x3c, 0xea, 0x88, 0x84, 0x7b, 0xd3, 0xad, 0xca, 0xf7, 0x3c, 0xc9, 0x44, 0x13, - 0x04, 0x10, 0xe5, 0xbf, 0x4e, 0x87, 0xf6, 0xec, 0x5b, 0xff, 0x74, 0xf8, 0x73, 0xb0, 0xd8, 0xf7, - 0x57, 0xc7, 0x55, 0x60, 0xa4, 0x36, 0x1f, 0xbd, 0x1e, 0x7b, 0xf0, 0x49, 0xeb, 0xea, 0x53, 0xe9, - 0x7b, 0x09, 0x70, 0x28, 0x71, 0x12, 0xf8, 0x2a, 0x98, 0x61, 0x64, 0x54, 0x69, 0x93, 0x5d, 0xdc, - 0x73, 0xeb, 0xc9, 0xfb, 0x2a, 0xb8, 0xef, 0x8b, 0x50, 0x50, 0x0f, 0x1e, 0x81, 0x05, 0x43, 0xef, - 0xec, 0x60, 0x0d, 0x77, 0x09, 0x63, 0x57, 0xce, 0x56, 0xf2, 0xf7, 0xc4, 0x72, 0xf3, 0x35, 0xf7, - 0xad, 0xa8, 0x15, 0x57, 0x61, 0xf7, 0xee, 0x84, 0x61, 0x9e, 0x04, 0x49, 0x90, 0xd0, 0x8c, 0x7d, - 0xc9, 0x76, 0x5e, 0xf2, 0xd7, 0xb3, 0x14, 0xd6, 0x09, 0xbf, 0x65, 0xa7, 0x3d, 0x97, 0x96, 0x4e, - 0xf4, 0x5c, 0x9a, 0x70, 0x6f, 0x2c, 0x4f, 0x78, 0x6f, 0xfc, 0x44, 0x02, 0x97, 0x8c, 0x0c, 0xb5, - 0x54, 0x01, 0x7c, 0x6d, 0x6e, 0x67, 0x59, 0x9b, 0x2c, 0xb5, 0xd9, 0x5c, 0x1d, 0x0d, 0x6b, 0x97, - 0xb2, 0x68, 0xa2, 0x4c, 0xfe, 0xc9, 0x1f, 0x15, 0xc1, 0xf9, 0x58, 0xb7, 0xfc, 0x1a, 0x1f, 0x6f, - 0x63, 0x37, 0xc9, 0xfc, 0x04, 0x37, 0xc9, 0x0d, 0x30, 0x2f, 0xfe, 0x1f, 0x20, 0x72, 0x11, 0xf5, - 0x36, 0x76, 0x33, 0x2c, 0x46, 0x51, 0xfd, 0xa4, 0xc7, 0xe3, 0xe2, 0x84, 0x8f, 0xc7, 0x41, 0x2f, - 0xc4, 0xff, 0xb7, 0x39, 0x65, 0x18, 0xf7, 0x42, 0xfc, 0x9b, 0x5b, 0x54, 0x9f, 0x91, 0x44, 0x07, - 0xd5, 0x43, 0x98, 0x0e, 0x93, 0xc4, 0x83, 0x90, 0x14, 0x45, 0xb4, 0xbf, 0xd4, 0x37, 0x6f, 0x9c, - 0xf0, 0xcd, 0x7b, 0x2d, 0x4b, 0xfe, 0x66, 0x7f, 0x27, 0x4e, 0xbc, 0xf1, 0xcf, 0x4c, 0x7e, 0xe3, - 0x97, 0xff, 0x22, 0x81, 0xe7, 0x52, 0x4f, 0x17, 0xb8, 0x11, 0xa2, 0x70, 0x6b, 0x11, 0x0a, 0xf7, - 0xbd, 0x54, 0xc3, 0x00, 0x8f, 0x33, 0x93, 0x9f, 0x90, 0xdf, 0xc8, 0xf6, 0x84, 0x9c, 0x70, 0x0f, - 0x1c, 0xff, 0x96, 0xdc, 0x5c, 0x7b, 0xf4, 0xb8, 0x7a, 0xe6, 0xd3, 0xc7, 0xd5, 0x33, 0x9f, 0x3f, - 0xae, 0x9e, 0xf9, 0xc5, 0xa8, 0x2a, 0x3d, 0x1a, 0x55, 0xa5, 0x4f, 0x47, 0x55, 0xe9, 0xf3, 0x51, - 0x55, 0xfa, 0xdb, 0xa8, 0x2a, 0xfd, 0xe6, 0x8b, 0xea, 0x99, 0xf7, 0xa7, 0xc5, 0x8c, 0xff, 0x0d, - 0x00, 0x00, 0xff, 0xff, 0x3e, 0x13, 0x3b, 0xc7, 0x94, 0x2b, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_c423c016abf485d4) +} + +var fileDescriptor_c423c016abf485d4 = []byte{ + // 2328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xf7, 0xf2, 0x43, 0x26, 0x87, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x5b, 0xd2, 0x58, 0x1b, + 0xb6, 0x12, 0xdb, 0xa4, 0xad, 0x7c, 0x20, 0xb1, 0xdb, 0x04, 0xa2, 0x94, 0xda, 0x0e, 0xf4, 0xc1, + 0x0c, 0x2d, 0x07, 0x0d, 0xfa, 0xe1, 0x11, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x13, + 0xbd, 0xf4, 0x5a, 0xa0, 0x40, 0xdb, 0x6b, 0xff, 0x89, 0xa2, 0x97, 0xa2, 0x68, 0xd0, 0x4b, 0x11, + 0x04, 0x3e, 0x06, 0xbd, 0x24, 0x27, 0xa2, 0x66, 0x4e, 0x45, 0xd1, 0x5b, 0x7b, 0x31, 0x50, 0xa0, + 0x98, 0xd9, 0xd9, 0xef, 0x5d, 0x73, 0xa9, 0xd8, 0x4a, 0x13, 0xe4, 0xc6, 0x9d, 0xf7, 0xde, 0x6f, + 0xde, 0xcc, 0xbc, 0x37, 0xef, 0x37, 0x33, 0x04, 0x17, 0x1f, 0xbc, 0x6e, 0x37, 0x14, 0xa3, 0x89, + 0x4d, 0xa5, 0x89, 0x4d, 0xd3, 0x6e, 0x1e, 0x5c, 0xdb, 0x23, 0x14, 0xaf, 0x36, 0xfb, 0x44, 0x27, + 0x16, 0xa6, 0xa4, 0xd7, 0x30, 0x2d, 0x83, 0x1a, 0x70, 0xd9, 0x51, 0x6c, 0x60, 0x53, 0x69, 0x30, + 0xc5, 0x86, 0x50, 0x3c, 0x7d, 0xa5, 0xaf, 0xd0, 0xfd, 0xc1, 0x5e, 0xa3, 0x6b, 0x68, 0xcd, 0xbe, + 0xd1, 0x37, 0x9a, 0x5c, 0x7f, 0x6f, 0x70, 0x9f, 0x7f, 0xf1, 0x0f, 0xfe, 0xcb, 0xc1, 0x39, 0x2d, + 0x07, 0x3a, 0xec, 0x1a, 0x16, 0x69, 0x1e, 0x5c, 0x8b, 0xf6, 0x75, 0xfa, 0x15, 0x5f, 0x47, 0xc3, + 0xdd, 0x7d, 0x45, 0x27, 0xd6, 0xb0, 0x69, 0x3e, 0xe8, 0xb3, 0x06, 0xbb, 0xa9, 0x11, 0x8a, 0x93, + 0xac, 0x9a, 0x69, 0x56, 0xd6, 0x40, 0xa7, 0x8a, 0x46, 0x62, 0x06, 0xaf, 0x4d, 0x32, 0xb0, 0xbb, + 0xfb, 0x44, 0xc3, 0x31, 0xbb, 0x97, 0xd3, 0xec, 0x06, 0x54, 0x51, 0x9b, 0x8a, 0x4e, 0x6d, 0x6a, + 0x45, 0x8d, 0xe4, 0xff, 0x48, 0x00, 0xae, 0x1b, 0x3a, 0xb5, 0x0c, 0x55, 0x25, 0x16, 0x22, 0x07, + 0x8a, 0xad, 0x18, 0x3a, 0xbc, 0x07, 0x4a, 0x6c, 0x3c, 0x3d, 0x4c, 0x71, 0x55, 0x3a, 0x2b, 0xad, + 0x54, 0x56, 0xaf, 0x36, 0xfc, 0x99, 0xf6, 0xe0, 0x1b, 0xe6, 0x83, 0x3e, 0x6b, 0xb0, 0x1b, 0x4c, + 0xbb, 0x71, 0x70, 0xad, 0xb1, 0xb3, 0xf7, 0x01, 0xe9, 0xd2, 0x2d, 0x42, 0x71, 0x0b, 0x3e, 0x1a, + 0xd5, 0x8f, 0x8d, 0x47, 0x75, 0xe0, 0xb7, 0x21, 0x0f, 0x15, 0xee, 0x80, 0x02, 0x47, 0xcf, 0x71, + 0xf4, 0x2b, 0xa9, 0xe8, 0x62, 0xd0, 0x0d, 0x84, 0x3f, 0x7c, 0xfb, 0x21, 0x25, 0x3a, 0x73, 0xaf, + 0x75, 0x42, 0x40, 0x17, 0x36, 0x30, 0xc5, 0x88, 0x03, 0xc1, 0xcb, 0xa0, 0x64, 0x09, 0xf7, 0xab, + 0xf9, 0xb3, 0xd2, 0x4a, 0xbe, 0x75, 0x52, 0x68, 0x95, 0xdc, 0x61, 0x21, 0x4f, 0x43, 0x7e, 0x24, + 0x81, 0xa5, 0xf8, 0xb8, 0x37, 0x15, 0x9b, 0xc2, 0x1f, 0xc7, 0xc6, 0xde, 0xc8, 0x36, 0x76, 0x66, + 0xcd, 0x47, 0xee, 0x75, 0xec, 0xb6, 0x04, 0xc6, 0xdd, 0x06, 0x45, 0x85, 0x12, 0xcd, 0xae, 0xe6, + 0xce, 0xe6, 0x57, 0x2a, 0xab, 0x97, 0x1a, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x15, 0xb8, + 0xc5, 0xdb, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xbc, 0x81, 0x89, 0x66, 0xe8, 0x1d, + 0x42, 0x8f, 0x60, 0xe5, 0x6e, 0x81, 0x82, 0x6d, 0x92, 0xae, 0x58, 0xb9, 0x0b, 0xa9, 0x03, 0xf0, + 0x7c, 0xea, 0x98, 0xa4, 0xeb, 0x2f, 0x19, 0xfb, 0x42, 0x1c, 0x01, 0xb6, 0xc1, 0x8c, 0x4d, 0x31, + 0x1d, 0xd8, 0x7c, 0xc1, 0x2a, 0xab, 0x2b, 0x19, 0xb0, 0xb8, 0x7e, 0x6b, 0x4e, 0xa0, 0xcd, 0x38, + 0xdf, 0x48, 0xe0, 0xc8, 0xff, 0xc8, 0x01, 0xe8, 0xe9, 0xae, 0x1b, 0x7a, 0x4f, 0xa1, 0x2c, 0x9c, + 0xaf, 0x83, 0x02, 0x1d, 0x9a, 0x84, 0x4f, 0x48, 0xb9, 0x75, 0xc1, 0x75, 0xe5, 0xce, 0xd0, 0x24, + 0x4f, 0x46, 0xf5, 0xa5, 0xb8, 0x05, 0x93, 0x20, 0x6e, 0x03, 0x37, 0x3d, 0x27, 0x73, 0xdc, 0xfa, + 0x95, 0x70, 0xd7, 0x4f, 0x46, 0xf5, 0x84, 0xbd, 0xa3, 0xe1, 0x21, 0x85, 0x1d, 0x84, 0x07, 0x00, + 0xaa, 0xd8, 0xa6, 0x77, 0x2c, 0xac, 0xdb, 0x4e, 0x4f, 0x8a, 0x46, 0xc4, 0xf0, 0x5f, 0xca, 0xb6, + 0x50, 0xcc, 0xa2, 0x75, 0x5a, 0x78, 0x01, 0x37, 0x63, 0x68, 0x28, 0xa1, 0x07, 0x78, 0x01, 0xcc, + 0x58, 0x04, 0xdb, 0x86, 0x5e, 0x2d, 0xf0, 0x51, 0x78, 0x13, 0x88, 0x78, 0x2b, 0x12, 0x52, 0xf8, + 0x22, 0x38, 0xae, 0x11, 0xdb, 0xc6, 0x7d, 0x52, 0x2d, 0x72, 0xc5, 0x79, 0xa1, 0x78, 0x7c, 0xcb, + 0x69, 0x46, 0xae, 0x5c, 0xfe, 0xa3, 0x04, 0x66, 0xbd, 0x99, 0x3b, 0x82, 0xcc, 0xb9, 0x19, 0xce, + 0x1c, 0x79, 0x72, 0xb0, 0xa4, 0x24, 0xcc, 0xc7, 0xf9, 0x80, 0xe3, 0x2c, 0x1c, 0xe1, 0x4f, 0x40, + 0xc9, 0x26, 0x2a, 0xe9, 0x52, 0xc3, 0x12, 0x8e, 0xbf, 0x9c, 0xd1, 0x71, 0xbc, 0x47, 0xd4, 0x8e, + 0x30, 0x6d, 0x9d, 0x60, 0x9e, 0xbb, 0x5f, 0xc8, 0x83, 0x84, 0xef, 0x82, 0x12, 0x25, 0x9a, 0xa9, + 0x62, 0x4a, 0x44, 0xd6, 0x9c, 0x0b, 0x3a, 0xcf, 0x62, 0x86, 0x81, 0xb5, 0x8d, 0xde, 0x1d, 0xa1, + 0xc6, 0x53, 0xc6, 0x9b, 0x0c, 0xb7, 0x15, 0x79, 0x30, 0xd0, 0x04, 0x73, 0x03, 0xb3, 0xc7, 0x34, + 0x29, 0xdb, 0xce, 0xfb, 0x43, 0x11, 0x43, 0x57, 0x27, 0xcf, 0xca, 0x6e, 0xc8, 0xae, 0xb5, 0x24, + 0x7a, 0x99, 0x0b, 0xb7, 0xa3, 0x08, 0x3e, 0x5c, 0x03, 0xf3, 0x9a, 0xa2, 0x23, 0x82, 0x7b, 0xc3, + 0x0e, 0xe9, 0x1a, 0x7a, 0xcf, 0xe6, 0xa1, 0x54, 0x6c, 0x2d, 0x0b, 0x80, 0xf9, 0xad, 0xb0, 0x18, + 0x45, 0xf5, 0xe1, 0x26, 0x58, 0x74, 0x37, 0xe0, 0x5b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa9, 0x68, + 0x0a, 0xad, 0xce, 0x70, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x44, 0x09, 0x72, 0x94, 0x68, 0x25, 0xff, + 0x76, 0x06, 0xcc, 0x47, 0xf6, 0x05, 0x78, 0x17, 0x2c, 0x75, 0x07, 0x96, 0x45, 0x74, 0xba, 0x3d, + 0xd0, 0xf6, 0x88, 0xd5, 0xe9, 0xee, 0x93, 0xde, 0x40, 0x25, 0x3d, 0xbe, 0xac, 0xc5, 0x56, 0x4d, + 0xf8, 0xba, 0xb4, 0x9e, 0xa8, 0x85, 0x52, 0xac, 0xe1, 0x3b, 0x00, 0xea, 0xbc, 0x69, 0x4b, 0xb1, + 0x6d, 0x0f, 0x33, 0xc7, 0x31, 0xbd, 0x54, 0xdc, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xe6, 0x63, 0x8f, + 0xd8, 0x8a, 0x45, 0x7a, 0x51, 0x1f, 0xf3, 0x61, 0x1f, 0x37, 0x12, 0xb5, 0x50, 0x8a, 0x35, 0x7c, + 0x15, 0x54, 0x9c, 0xde, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x10, 0x60, 0x95, 0x6d, 0x5f, 0x84, 0x82, + 0x7a, 0x6c, 0x68, 0xc6, 0x9e, 0x4d, 0xac, 0x03, 0xd2, 0xbb, 0xe9, 0x90, 0x03, 0x56, 0x41, 0x8b, + 0xbc, 0x82, 0x7a, 0x43, 0xdb, 0x89, 0x69, 0xa0, 0x04, 0x2b, 0x36, 0x34, 0x27, 0x6a, 0x62, 0x43, + 0x9b, 0x09, 0x0f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3b, 0xc0, + 0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x3d, 0x1e, 0x8e, 0xbd, 0xed, 0xb0, 0x18, 0x45, 0xf5, 0xe1, 0x4d, + 0x70, 0xca, 0x69, 0xda, 0xd5, 0xb1, 0x07, 0x52, 0xe2, 0x20, 0x2f, 0x08, 0x90, 0x53, 0xdb, 0x51, + 0x05, 0x14, 0xb7, 0x81, 0xd7, 0xc1, 0x5c, 0xd7, 0x50, 0x55, 0x1e, 0x8f, 0xeb, 0xc6, 0x40, 0xa7, + 0xd5, 0x32, 0x47, 0x81, 0x2c, 0x87, 0xd6, 0x43, 0x12, 0x14, 0xd1, 0x84, 0x3f, 0x03, 0xa0, 0xeb, + 0x16, 0x06, 0xbb, 0x0a, 0x26, 0x30, 0x80, 0x78, 0x59, 0xf2, 0x2b, 0xb3, 0xd7, 0x64, 0xa3, 0x00, + 0xa4, 0xfc, 0xb1, 0x04, 0x96, 0x53, 0x12, 0x1d, 0xbe, 0x15, 0x2a, 0x82, 0x97, 0x22, 0x45, 0xf0, + 0x4c, 0x8a, 0x59, 0xa0, 0x12, 0xee, 0x83, 0x59, 0x46, 0x48, 0x14, 0xbd, 0xef, 0xa8, 0x88, 0xbd, + 0xac, 0x99, 0x3a, 0x00, 0x14, 0xd4, 0xf6, 0x77, 0xe5, 0x53, 0xe3, 0x51, 0x7d, 0x36, 0x24, 0x43, + 0x61, 0x60, 0xf9, 0x57, 0x39, 0x00, 0x36, 0x88, 0xa9, 0x1a, 0x43, 0x8d, 0xe8, 0x47, 0xc1, 0x69, + 0x6e, 0x87, 0x38, 0xcd, 0xc5, 0xf4, 0x25, 0xf1, 0x9c, 0x4a, 0x25, 0x35, 0xef, 0x46, 0x48, 0xcd, + 0x8b, 0x59, 0xc0, 0x9e, 0xce, 0x6a, 0x3e, 0xcb, 0x83, 0x05, 0x5f, 0xd9, 0xa7, 0x35, 0x37, 0x42, + 0x2b, 0x7a, 0x31, 0xb2, 0xa2, 0xcb, 0x09, 0x26, 0xcf, 0x8d, 0xd7, 0x7c, 0x00, 0xe6, 0x18, 0xeb, + 0x70, 0xd6, 0x8f, 0x73, 0x9a, 0x99, 0xa9, 0x39, 0x8d, 0x57, 0x89, 0x36, 0x43, 0x48, 0x28, 0x82, + 0x9c, 0xc2, 0xa1, 0x8e, 0x7f, 0x1d, 0x39, 0xd4, 0x9f, 0x24, 0x30, 0xe7, 0x2f, 0xd3, 0x11, 0x90, + 0xa8, 0x5b, 0x61, 0x12, 0x75, 0x2e, 0x43, 0x70, 0xa6, 0xb0, 0xa8, 0xcf, 0x0a, 0x41, 0xd7, 0x39, + 0x8d, 0x5a, 0x61, 0x47, 0x30, 0x53, 0x55, 0xba, 0xd8, 0x16, 0xf5, 0xf6, 0x84, 0x73, 0xfc, 0x72, + 0xda, 0x90, 0x27, 0x0d, 0x11, 0xae, 0xdc, 0xf3, 0x25, 0x5c, 0xf9, 0x67, 0x43, 0xb8, 0x7e, 0x04, + 0x4a, 0xb6, 0x4b, 0xb5, 0x0a, 0x1c, 0xf2, 0x52, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8, + 0x95, 0x07, 0x97, 0xc4, 0xac, 0x8a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a, + 0x3c, 0xa9, 0x4a, 0x7e, 0xa0, 0xb7, 0x79, 0x2b, 0x12, 0x52, 0xb8, 0x0b, 0x96, 0x4d, 0xcb, 0xe8, + 0x5b, 0xc4, 0xb6, 0x37, 0x08, 0xee, 0xa9, 0x8a, 0x4e, 0xdc, 0x01, 0x38, 0x35, 0xf1, 0xcc, 0x78, + 0x54, 0x5f, 0x6e, 0x27, 0xab, 0xa0, 0x34, 0x5b, 0xf9, 0xaf, 0x05, 0x70, 0x32, 0xba, 0x37, 0xa6, + 0xd0, 0x14, 0xe9, 0x50, 0x34, 0xe5, 0x72, 0x20, 0x4e, 0x1d, 0x0e, 0x17, 0xb8, 0x2a, 0x88, 0xc5, + 0xea, 0x1a, 0x98, 0x17, 0xb4, 0xc4, 0x15, 0x0a, 0xa2, 0xe6, 0x2d, 0xcf, 0x6e, 0x58, 0x8c, 0xa2, + 0xfa, 0xf0, 0x06, 0x98, 0xb5, 0x38, 0xf3, 0x72, 0x01, 0x1c, 0xf6, 0xf2, 0x1d, 0x01, 0x30, 0x8b, + 0x82, 0x42, 0x14, 0xd6, 0x65, 0xcc, 0xc5, 0x27, 0x24, 0x2e, 0x40, 0x21, 0xcc, 0x5c, 0xd6, 0xa2, + 0x0a, 0x28, 0x6e, 0x03, 0xb7, 0xc0, 0xc2, 0x40, 0x8f, 0x43, 0x39, 0xb1, 0x76, 0x46, 0x40, 0x2d, + 0xec, 0xc6, 0x55, 0x50, 0x92, 0x1d, 0xbc, 0x17, 0x22, 0x33, 0x33, 0x7c, 0x3f, 0xb9, 0x9c, 0x21, + 0x27, 0x32, 0xb3, 0x99, 0x04, 0xaa, 0x55, 0xca, 0x4a, 0xb5, 0xe4, 0x8f, 0x24, 0x00, 0xe3, 0x79, + 0x38, 0xf1, 0x26, 0x20, 0x66, 0x11, 0xa8, 0x98, 0x4a, 0x32, 0xff, 0xb9, 0x9a, 0x91, 0xff, 0xf8, + 0x1b, 0x6a, 0x36, 0x02, 0x24, 0x26, 0xfa, 0x68, 0x2e, 0x75, 0xb2, 0x12, 0x20, 0xdf, 0xa9, 0x67, + 0x40, 0x80, 0x02, 0x60, 0x4f, 0x27, 0x40, 0xff, 0xcc, 0x81, 0x05, 0x5f, 0x39, 0x33, 0x01, 0x4a, + 0x30, 0xf9, 0xf6, 0x62, 0x27, 0x1b, 0x29, 0xf1, 0xa7, 0xee, 0xff, 0x89, 0x94, 0xf8, 0x5e, 0xa5, + 0x90, 0x92, 0xdf, 0xe7, 0x82, 0xae, 0x4f, 0x49, 0x4a, 0x9e, 0xc1, 0x0d, 0xc7, 0xd7, 0x8e, 0xd7, + 0xc8, 0x9f, 0xe4, 0xc1, 0xc9, 0x68, 0x1e, 0x86, 0x0a, 0xa4, 0x34, 0xb1, 0x40, 0xb6, 0xc1, 0xe2, + 0xfd, 0x81, 0xaa, 0x0e, 0xf9, 0x18, 0x02, 0x55, 0xd2, 0x29, 0xad, 0xdf, 0x15, 0x96, 0x8b, 0x3f, + 0x4c, 0xd0, 0x41, 0x89, 0x96, 0xf1, 0x7a, 0x59, 0xf8, 0xb2, 0xf5, 0xb2, 0x78, 0x88, 0x7a, 0x99, + 0x4c, 0x39, 0xf2, 0x87, 0xa2, 0x1c, 0xd3, 0x15, 0xcb, 0x84, 0x8d, 0x6b, 0xe2, 0xd1, 0x7f, 0x2c, + 0x81, 0xa5, 0xe4, 0x03, 0x37, 0x54, 0xc1, 0x9c, 0x86, 0x1f, 0x06, 0x2f, 0x3e, 0x26, 0x15, 0x91, + 0x01, 0x55, 0xd4, 0x86, 0xf3, 0x64, 0xd4, 0xb8, 0xad, 0xd3, 0x1d, 0xab, 0x43, 0x2d, 0x45, 0xef, + 0x3b, 0x95, 0x77, 0x2b, 0x84, 0x85, 0x22, 0xd8, 0xf0, 0x7d, 0x50, 0xd2, 0xf0, 0xc3, 0xce, 0xc0, + 0xea, 0x27, 0x55, 0xc8, 0x6c, 0xfd, 0xf0, 0x04, 0xd8, 0x12, 0x28, 0xc8, 0xc3, 0x93, 0xbf, 0x90, + 0xc0, 0x72, 0x4a, 0x55, 0xfd, 0x06, 0x8d, 0xf2, 0x2f, 0x12, 0x38, 0x1b, 0x1a, 0x25, 0x4b, 0x4b, + 0x72, 0x7f, 0xa0, 0xf2, 0x0c, 0x15, 0x4c, 0xe6, 0x12, 0x28, 0x9b, 0xd8, 0xa2, 0x8a, 0xc7, 0x83, + 0x8b, 0xad, 0xd9, 0xf1, 0xa8, 0x5e, 0x6e, 0xbb, 0x8d, 0xc8, 0x97, 0x27, 0xcc, 0x4d, 0xee, 0xf9, + 0xcd, 0x8d, 0xfc, 0x5f, 0x09, 0x14, 0x3b, 0x5d, 0xac, 0x92, 0x23, 0x20, 0x2e, 0x1b, 0x21, 0xe2, + 0x92, 0xfe, 0x28, 0xc0, 0xfd, 0x49, 0xe5, 0x2c, 0x9b, 0x11, 0xce, 0x72, 0x7e, 0x02, 0xce, 0xd3, + 0xe9, 0xca, 0x1b, 0xa0, 0xec, 0x75, 0x37, 0xdd, 0x5e, 0x2a, 0xff, 0x2e, 0x07, 0x2a, 0x81, 0x2e, + 0xa6, 0xdc, 0x89, 0xef, 0x85, 0xca, 0x0f, 0xdb, 0x63, 0x56, 0xb3, 0x0c, 0xa4, 0xe1, 0x96, 0x9a, + 0xb7, 0x75, 0x6a, 0x05, 0xcf, 0xaa, 0xf1, 0x0a, 0xf4, 0x26, 0x98, 0xa3, 0xd8, 0xea, 0x13, 0xea, + 0xca, 0xf8, 0x84, 0x95, 0xfd, 0xbb, 0x9b, 0x3b, 0x21, 0x29, 0x8a, 0x68, 0x9f, 0xbe, 0x01, 0x66, + 0x43, 0x9d, 0xc1, 0x93, 0x20, 0xff, 0x80, 0x0c, 0x1d, 0x06, 0x87, 0xd8, 0x4f, 0xb8, 0x08, 0x8a, + 0x07, 0x58, 0x1d, 0x38, 0x21, 0x5a, 0x46, 0xce, 0xc7, 0xf5, 0xdc, 0xeb, 0x92, 0xfc, 0x6b, 0x36, + 0x39, 0x7e, 0x2a, 0x1c, 0x41, 0x74, 0xbd, 0x13, 0x8a, 0xae, 0xf4, 0xf7, 0xc9, 0x60, 0x82, 0xa6, + 0xc5, 0x18, 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xc5, 0x80, + 0xb6, 0xcf, 0x8c, 0xbf, 0x1f, 0x62, 0xc6, 0x2b, 0x11, 0x66, 0x5c, 0x4d, 0xb2, 0xf9, 0x96, 0x1a, + 0x4f, 0xa6, 0xc6, 0x7f, 0x96, 0xc0, 0x7c, 0x60, 0xee, 0x8e, 0x80, 0x1b, 0xdf, 0x0e, 0x73, 0xe3, + 0xf3, 0x59, 0x82, 0x26, 0x85, 0x1c, 0x5f, 0x07, 0x0b, 0x01, 0xa5, 0x1d, 0xab, 0xa7, 0xe8, 0x58, + 0xb5, 0xe1, 0x39, 0x50, 0xb4, 0x29, 0xb6, 0xa8, 0x5b, 0x44, 0x5c, 0xdb, 0x0e, 0x6b, 0x44, 0x8e, + 0x4c, 0xfe, 0xb7, 0x04, 0x9a, 0x01, 0xe3, 0x36, 0xb1, 0x6c, 0xc5, 0xa6, 0x44, 0xa7, 0x77, 0x0d, + 0x75, 0xa0, 0x91, 0x75, 0x15, 0x2b, 0x1a, 0x22, 0xac, 0x41, 0x31, 0xf4, 0xb6, 0xa1, 0x2a, 0xdd, + 0x21, 0xc4, 0xa0, 0xf2, 0xe1, 0x3e, 0xd1, 0x37, 0x88, 0x4a, 0xa8, 0x78, 0x81, 0x2b, 0xb7, 0xde, + 0x72, 0x1f, 0xa4, 0xde, 0xf3, 0x45, 0x4f, 0x46, 0xf5, 0x95, 0x2c, 0x88, 0x3c, 0x42, 0x83, 0x98, + 0xf0, 0xa7, 0x00, 0xb0, 0x4f, 0xbe, 0x97, 0xf5, 0x44, 0xb0, 0xbe, 0xe9, 0x66, 0xf4, 0x7b, 0x9e, + 0x64, 0xaa, 0x0e, 0x02, 0x88, 0xf2, 0x1f, 0x4a, 0xa1, 0xf5, 0xfe, 0xc6, 0xdf, 0x72, 0xfe, 0x1c, + 0x2c, 0x1e, 0xf8, 0xb3, 0xe3, 0x2a, 0x30, 0xfe, 0x9d, 0x8f, 0x9e, 0xe4, 0x3d, 0xf8, 0xa4, 0x79, + 0xf5, 0x59, 0xff, 0xdd, 0x04, 0x38, 0x94, 0xd8, 0x09, 0x7c, 0x15, 0x54, 0x18, 0x6f, 0x56, 0xba, + 0x64, 0x1b, 0x6b, 0x6e, 0x2e, 0x7a, 0x0f, 0x98, 0x1d, 0x5f, 0x84, 0x82, 0x7a, 0x70, 0x1f, 0x2c, + 0x98, 0x46, 0x6f, 0x0b, 0xeb, 0xb8, 0x4f, 0x18, 0x11, 0x74, 0x96, 0x92, 0x5f, 0x7d, 0x96, 0x5b, + 0xaf, 0xb9, 0xd7, 0x5a, 0xed, 0xb8, 0xca, 0x93, 0x51, 0x7d, 0x39, 0xa1, 0x99, 0x07, 0x41, 0x12, + 0x24, 0xb4, 0x62, 0x8f, 0xee, 0xce, 0xa3, 0xc3, 0x6a, 0x96, 0xa4, 0x3c, 0xe4, 0xb3, 0x7b, 0xda, + 0xcd, 0x6e, 0xe9, 0x50, 0x37, 0xbb, 0x09, 0x47, 0xdc, 0xf2, 0x94, 0x47, 0xdc, 0x4f, 0x24, 0x70, + 0xde, 0xcc, 0x90, 0x4b, 0x55, 0xc0, 0xe7, 0xe6, 0x56, 0x96, 0xb9, 0xc9, 0x92, 0x9b, 0xad, 0x95, + 0xf1, 0xa8, 0x7e, 0x3e, 0x8b, 0x26, 0xca, 0xe4, 0x1f, 0xbc, 0x0b, 0x4a, 0x86, 0xd8, 0x03, 0xab, + 0x15, 0xee, 0xeb, 0xe5, 0x2c, 0xbe, 0xba, 0xfb, 0xa6, 0x93, 0x96, 0xee, 0x17, 0xf2, 0xb0, 0xe4, + 0x8f, 0x8a, 0xe0, 0x54, 0xac, 0x82, 0x7f, 0x85, 0xf7, 0xd7, 0xb1, 0xc3, 0x74, 0x7e, 0x8a, 0xc3, + 0xf4, 0x1a, 0x98, 0x17, 0x7f, 0x89, 0x88, 0x9c, 0xc5, 0xbd, 0x80, 0x59, 0x0f, 0x8b, 0x51, 0x54, + 0x3f, 0xe9, 0xfe, 0xbc, 0x38, 0xe5, 0xfd, 0x79, 0xd0, 0x0b, 0xf1, 0x17, 0x3f, 0x27, 0xbd, 0xe3, + 0x5e, 0x88, 0x7f, 0xfa, 0x45, 0xf5, 0x19, 0x71, 0x75, 0x50, 0x3d, 0x84, 0xe3, 0x61, 0xe2, 0xba, + 0x1b, 0x92, 0xa2, 0x88, 0xf6, 0x97, 0x7a, 0xf6, 0xc7, 0x09, 0xcf, 0xfe, 0x57, 0xb2, 0xc4, 0x5a, + 0xf6, 0xab, 0xf2, 0xc4, 0x4b, 0x8f, 0xca, 0xf4, 0x97, 0x1e, 0xf2, 0xdf, 0x24, 0xf0, 0x42, 0xea, + 0xae, 0x05, 0xd7, 0x42, 0xb4, 0xf2, 0x4a, 0x84, 0x56, 0x7e, 0x2f, 0xd5, 0x30, 0xc0, 0x2d, 0xad, + 0xe4, 0x5b, 0xf4, 0x37, 0xb2, 0xdd, 0xa2, 0x27, 0x9c, 0x84, 0x27, 0x5f, 0xa7, 0xb7, 0x7e, 0xf0, + 0xe8, 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d, + 0x7a, 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa, + 0xcd, 0x17, 0xb5, 0x63, 0xef, 0x2f, 0xa7, 0xfc, 0xe9, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xa4, 0x79, 0xcd, 0x52, 0x8e, 0x2c, 0x00, 0x00, } func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { @@ -2265,6 +2297,18 @@ func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (in _ = i var l int _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.Partition != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) i-- @@ -2560,6 +2604,32 @@ func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StatefulSetOrdinals) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetOrdinals) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetOrdinals) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Start)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2613,6 +2683,18 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Ordinals != nil { + { + size, err := m.Ordinals.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.PersistentVolumeClaimRetentionPolicy != nil { { size, err := m.PersistentVolumeClaimRetentionPolicy.MarshalToSizedBuffer(dAtA[:i]) @@ -3215,6 +3297,10 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { if m.Partition != nil { n += 1 + sovGenerated(uint64(*m.Partition)) } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3314,6 +3400,16 @@ func (m *StatefulSetList) Size() (n int) { return n } +func (m *StatefulSetOrdinals) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Start)) + return n +} + func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Size() (n int) { if m == nil { return 0 @@ -3362,6 +3458,10 @@ func (m *StatefulSetSpec) Size() (n int) { l = m.PersistentVolumeClaimRetentionPolicy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Ordinals != nil { + l = m.Ordinals.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3729,6 +3829,7 @@ func (this *RollingUpdateStatefulSetStrategy) String() string { } s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -3819,6 +3920,16 @@ func (this *StatefulSetList) String() string { }, "") return s } +func (this *StatefulSetOrdinals) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetOrdinals{`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetPersistentVolumeClaimRetentionPolicy) String() string { if this == nil { return "nil" @@ -3850,6 +3961,7 @@ func (this *StatefulSetSpec) String() string { `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `PersistentVolumeClaimRetentionPolicy:` + strings.Replace(this.PersistentVolumeClaimRetentionPolicy.String(), "StatefulSetPersistentVolumeClaimRetentionPolicy", "StatefulSetPersistentVolumeClaimRetentionPolicy", 1) + `,`, + `Ordinals:` + strings.Replace(this.Ordinals.String(), "StatefulSetOrdinals", "StatefulSetOrdinals", 1) + `,`, `}`, }, "") return s @@ -7395,6 +7507,42 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } } m.Partition = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8339,6 +8487,75 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetOrdinals) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetOrdinals: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetOrdinals: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8777,6 +8994,42 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordinals", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ordinals == nil { + m.Ordinals = &StatefulSetOrdinals{} + } + if err := m.Ordinals.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 57595d690..d3db8956e 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta2"; +option go_package = "k8s.io/api/apps/v1beta2"; // DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the // release notes for more information. @@ -45,10 +45,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -71,7 +71,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -97,7 +97,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -113,7 +113,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -125,14 +125,15 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -203,6 +204,8 @@ message DaemonSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DaemonSetCondition conditions = 10; } @@ -227,7 +230,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -247,10 +250,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -263,7 +266,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -279,10 +282,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + // The only allowed template.spec.restartPolicy value is "Always". + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -344,6 +348,8 @@ message DeploymentStatus { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -376,7 +382,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -402,7 +408,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -418,7 +424,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -444,18 +450,18 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -479,6 +485,8 @@ message ReplicaSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicaSetCondition conditions = 6; } @@ -499,7 +507,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -519,9 +527,8 @@ message RollingUpdateDaemonSet { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -537,7 +544,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -551,23 +558,34 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. message RollingUpdateStatefulSetStrategy { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. + // Partition indicates the ordinal at which the StatefulSet should be partitioned + // for updates. During a rolling update, all pods from ordinal Replicas-1 to + // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. + // This is helpful in being able to do a canary based deployment. The default value is 0. // +optional optional int32 partition = 1; + + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. This can not be 0. + // Defaults to 1. This field is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to + // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it + // will be counted towards MaxUnavailable. + // +optional + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -590,7 +608,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -616,7 +634,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -638,7 +656,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -652,11 +670,26 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +message StatefulSetOrdinals { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + optional int32 start = 1; +} + // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs // created from the StatefulSet VolumeClaimTemplates. message StatefulSetPersistentVolumeClaimRetentionPolicy { @@ -687,13 +720,16 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -703,7 +739,8 @@ message StatefulSetSpec { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + // +listType=atomic + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -737,7 +774,6 @@ message StatefulSetSpec { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional optional int32 minReadySeconds = 9; @@ -746,6 +782,12 @@ message StatefulSetSpec { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. + // +optional + optional StatefulSetOrdinals ordinals = 11; } // StatefulSetStatus represents the current state of a StatefulSet. @@ -787,10 +829,11 @@ message StatefulSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated StatefulSetCondition conditions = 10; // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional optional int32 availableReplicas = 11; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index c69168e6c..f93a5bea7 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -43,7 +43,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -163,11 +163,21 @@ const ( // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. type RollingUpdateStatefulSetStrategy struct { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. + // Partition indicates the ordinal at which the StatefulSet should be partitioned + // for updates. During a rolling update, all pods from ordinal Replicas-1 to + // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. + // This is helpful in being able to do a canary based deployment. The default value is 0. // +optional Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding up. This can not be 0. + // Defaults to 1. This field is alpha-level and is only honored by servers that enable the + // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to + // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it + // will be counted towards MaxUnavailable. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"` } // PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine @@ -204,6 +214,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct { WhenScaled PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty" protobuf:"bytes,2,opt,name=whenScaled,casttype=PersistentVolumeClaimRetentionPolicyType"` } +// StatefulSetOrdinals describes the policy used for replica ordinal assignment +// in this StatefulSet. +type StatefulSetOrdinals struct { + // start is the number representing the first replica's index. It may be used + // to number replicas from an alternate index (eg: 1-indexed) over the default + // 0-indexed names, or to orchestrate progressive movement of replicas from + // one StatefulSet to another. + // If set, replica indices will be in the range: + // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas). + // If unset, defaults to 0. Replica indices will be in the range: + // [0, .spec.replicas). + // +optional + Start int32 `json:"start" protobuf:"varint,1,opt,name=start"` +} + // A StatefulSetSpec is the specification of a StatefulSet. type StatefulSetSpec struct { // replicas is the desired number of replicas of the given Template. @@ -222,7 +247,10 @@ type StatefulSetSpec struct { // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. + // of the StatefulSet. Each pod will be named with the format + // -. For example, a pod in a StatefulSet named + // "web" with index number "3" would be named "web-3". + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // volumeClaimTemplates is a list of claims that pods are allowed to reference. @@ -233,6 +261,7 @@ type StatefulSetSpec struct { // any volumes in the template, with the same name. // TODO: Define the behavior if a claim already exists with the same name. // +optional + // +listType=atomic VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` // serviceName is the name of the service that governs this StatefulSet. @@ -267,7 +296,6 @@ type StatefulSetSpec struct { // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) - // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` @@ -276,6 +304,12 @@ type StatefulSetSpec struct { // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` + + // ordinals controls the numbering of replica indices in a StatefulSet. The + // default ordinals behavior assigns a "0" index to the first replica and + // increments the index by one for each additional replica requested. + // +optional + Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } // StatefulSetStatus represents the current state of a StatefulSet. @@ -317,10 +351,11 @@ type StatefulSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. - // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` } @@ -396,6 +431,7 @@ type DeploymentSpec struct { Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. + // The only allowed template.spec.restartPolicy value is "Always". Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` // The deployment strategy to use to replace existing pods with new ones. @@ -520,6 +556,8 @@ type DeploymentStatus struct { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -641,7 +679,6 @@ type RollingUpdateDaemonSet struct { // daemonset on any given node can double if the readiness check fails, and // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. - // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -658,6 +695,7 @@ type DaemonSetSpec struct { // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` @@ -730,6 +768,8 @@ type DaemonSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } @@ -892,7 +932,7 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` @@ -916,6 +956,8 @@ type ReplicaSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 3f3f12202..0b8fe34af 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ @@ -85,7 +85,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -162,7 +162,7 @@ var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", + "template": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", @@ -248,7 +248,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "readyReplicas is the number of pods targeted by this ReplicaSet controller with a Ready Condition.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", @@ -263,7 +263,7 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.", + "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", } func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { @@ -281,8 +281,9 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { } var map_RollingUpdateStatefulSetStrategy = map[string]string{ - "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", - "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", } func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { @@ -312,7 +313,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -351,6 +352,15 @@ func (StatefulSetList) SwaggerDoc() map[string]string { return map_StatefulSetList } +var map_StatefulSetOrdinals = map[string]string{ + "": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "start": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", +} + +func (StatefulSetOrdinals) SwaggerDoc() map[string]string { + return map_StatefulSetOrdinals +} + var map_StatefulSetPersistentVolumeClaimRetentionPolicy = map[string]string{ "": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", "whenDeleted": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", @@ -365,14 +375,15 @@ var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format -. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { @@ -390,7 +401,7 @@ var map_StatefulSetStatus = map[string]string{ "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", "conditions": "Represents the latest available observations of a statefulset's current state.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 8293b9886..cd92792db 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -597,6 +597,11 @@ func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateState *out = new(int32) **out = **in } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } return } @@ -755,6 +760,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals. +func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals { + if in == nil { + return nil + } + out := new(StatefulSetOrdinals) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) { *out = *in @@ -803,6 +824,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { *out = new(StatefulSetPersistentVolumeClaimRetentionPolicy) **out = **in } + if in.Ordinals != nil { + in, out := &in.Ordinals, &out.Ordinals + *out = new(StatefulSetOrdinals) + **out = **in + } return } diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 1614265bd..3bdc89bad 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 89e7e9204..6d922030c 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto +// source: k8s.io/api/authentication/v1/generated.proto package v1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } func (*BoundObjectReference) ProtoMessage() {} func (*BoundObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{0} + return fileDescriptor_d1237cbf54dccd53, []int{0} } func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{1} + return fileDescriptor_d1237cbf54dccd53, []int{1} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,10 +102,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_d1237cbf54dccd53, []int{2} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_d1237cbf54dccd53, []int{3} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{2} + return fileDescriptor_d1237cbf54dccd53, []int{4} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +189,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } func (*TokenRequestSpec) ProtoMessage() {} func (*TokenRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{3} + return fileDescriptor_d1237cbf54dccd53, []int{5} } func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +217,7 @@ var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } func (*TokenRequestStatus) ProtoMessage() {} func (*TokenRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{4} + return fileDescriptor_d1237cbf54dccd53, []int{6} } func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +245,7 @@ var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{5} + return fileDescriptor_d1237cbf54dccd53, []int{7} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +273,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{6} + return fileDescriptor_d1237cbf54dccd53, []int{8} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +301,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{7} + return fileDescriptor_d1237cbf54dccd53, []int{9} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +329,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{8} + return fileDescriptor_d1237cbf54dccd53, []int{10} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,6 +357,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1.SelfSubjectReviewStatus") proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest") proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec") proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus") @@ -312,68 +370,71 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) -} - -var fileDescriptor_2953ea822e7ffe1e = []byte{ - // 906 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0xc4, - 0x17, 0x8f, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0x7e, 0xdb, 0xd9, 0xef, 0x4a, 0x51, 0x81, 0xa4, 0x78, - 0x25, 0x54, 0x01, 0x6b, 0x6f, 0x22, 0x04, 0xab, 0x45, 0x42, 0xaa, 0x69, 0x04, 0x11, 0x82, 0x5d, - 0xcd, 0x6e, 0x0b, 0xe2, 0xc4, 0xc4, 0x7e, 0x4d, 0x87, 0xe0, 0xb1, 0xb1, 0xc7, 0x61, 0x73, 0xdb, - 0x3f, 0x81, 0x23, 0x48, 0x1c, 0xf8, 0x23, 0x90, 0xf8, 0x17, 0x7a, 0x5c, 0x71, 0xda, 0x03, 0x8a, - 0xa8, 0xb9, 0x72, 0xe4, 0xc4, 0x09, 0xcd, 0x78, 0x5a, 0xc7, 0x49, 0x9b, 0xe6, 0xc4, 0x2d, 0x7e, - 0xef, 0xf3, 0x3e, 0xef, 0xbd, 0xcf, 0x7c, 0x32, 0x83, 0xfa, 0xe3, 0x07, 0xb1, 0xc5, 0x02, 0x7b, - 0x9c, 0x0c, 0x21, 0xe2, 0x20, 0x20, 0xb6, 0x27, 0xc0, 0xbd, 0x20, 0xb2, 0x75, 0x82, 0x86, 0xcc, - 0xa6, 0x89, 0x38, 0x05, 0x2e, 0x98, 0x4b, 0x05, 0x0b, 0xb8, 0x3d, 0xe9, 0xda, 0x23, 0xe0, 0x10, - 0x51, 0x01, 0x9e, 0x15, 0x46, 0x81, 0x08, 0xf0, 0xab, 0x19, 0xda, 0xa2, 0x21, 0xb3, 0x8a, 0x68, - 0x6b, 0xd2, 0xdd, 0xbd, 0x37, 0x62, 0xe2, 0x34, 0x19, 0x5a, 0x6e, 0xe0, 0xdb, 0xa3, 0x60, 0x14, - 0xd8, 0xaa, 0x68, 0x98, 0x9c, 0xa8, 0x2f, 0xf5, 0xa1, 0x7e, 0x65, 0x64, 0xbb, 0xef, 0xe4, 0xad, - 0x7d, 0xea, 0x9e, 0x32, 0x0e, 0xd1, 0xd4, 0x0e, 0xc7, 0x23, 0x19, 0x88, 0x6d, 0x1f, 0x04, 0xbd, - 0x62, 0x84, 0x5d, 0xfb, 0xba, 0xaa, 0x28, 0xe1, 0x82, 0xf9, 0xb0, 0x54, 0xf0, 0xee, 0x4d, 0x05, - 0xb1, 0x7b, 0x0a, 0x3e, 0x5d, 0xac, 0x33, 0x7f, 0x33, 0xd0, 0xff, 0x9d, 0x20, 0xe1, 0xde, 0xa3, - 0xe1, 0xd7, 0xe0, 0x0a, 0x02, 0x27, 0x10, 0x01, 0x77, 0x01, 0xef, 0xa1, 0xea, 0x98, 0x71, 0xaf, - 0x65, 0xec, 0x19, 0xfb, 0x0d, 0xe7, 0xd6, 0xd9, 0xac, 0x53, 0x4a, 0x67, 0x9d, 0xea, 0x27, 0x8c, - 0x7b, 0x44, 0x65, 0x70, 0x0f, 0x21, 0x1a, 0xb2, 0x63, 0x88, 0x62, 0x16, 0xf0, 0x56, 0x59, 0xe1, - 0xb0, 0xc6, 0xa1, 0x83, 0xc7, 0x03, 0x9d, 0x21, 0x73, 0x28, 0xc9, 0xca, 0xa9, 0x0f, 0xad, 0x4a, - 0x91, 0xf5, 0x33, 0xea, 0x03, 0x51, 0x19, 0xec, 0xa0, 0x4a, 0x32, 0x38, 0x6c, 0x55, 0x15, 0xe0, - 0xbe, 0x06, 0x54, 0x8e, 0x06, 0x87, 0xff, 0xcc, 0x3a, 0xaf, 0x5f, 0xb7, 0xa4, 0x98, 0x86, 0x10, - 0x5b, 0x47, 0x83, 0x43, 0x22, 0x8b, 0xcd, 0xf7, 0x10, 0xea, 0x3f, 0x13, 0x11, 0x3d, 0xa6, 0xdf, - 0x24, 0x80, 0x3b, 0xa8, 0xc6, 0x04, 0xf8, 0x71, 0xcb, 0xd8, 0xab, 0xec, 0x37, 0x9c, 0x46, 0x3a, - 0xeb, 0xd4, 0x06, 0x32, 0x40, 0xb2, 0xf8, 0xc3, 0xfa, 0x0f, 0x3f, 0x77, 0x4a, 0xcf, 0x7f, 0xdf, - 0x2b, 0x99, 0x3f, 0x95, 0xd1, 0xad, 0xa7, 0xc1, 0x18, 0x38, 0x81, 0x6f, 0x13, 0x88, 0x05, 0xfe, - 0x0a, 0xd5, 0xe5, 0x11, 0x79, 0x54, 0x50, 0xa5, 0x44, 0xb3, 0x77, 0xdf, 0xca, 0xdd, 0x71, 0x39, - 0x84, 0x15, 0x8e, 0x47, 0x32, 0x10, 0x5b, 0x12, 0x6d, 0x4d, 0xba, 0x56, 0x26, 0xe7, 0xa7, 0x20, - 0x68, 0xae, 0x49, 0x1e, 0x23, 0x97, 0xac, 0xf8, 0x31, 0xaa, 0xc6, 0x21, 0xb8, 0x4a, 0xbf, 0x66, - 0xcf, 0xb2, 0x56, 0x79, 0xcf, 0x9a, 0x9f, 0xed, 0x49, 0x08, 0x6e, 0xae, 0xa0, 0xfc, 0x22, 0x8a, - 0x09, 0x7f, 0x81, 0x36, 0x62, 0x41, 0x45, 0x12, 0x2b, 0x95, 0x8b, 0x13, 0xdf, 0xc4, 0xa9, 0xea, - 0x9c, 0x2d, 0xcd, 0xba, 0x91, 0x7d, 0x13, 0xcd, 0x67, 0xfe, 0x6d, 0xa0, 0xed, 0xc5, 0x11, 0xf0, - 0x5b, 0xa8, 0x41, 0x13, 0x8f, 0x49, 0xd3, 0x5c, 0x48, 0xbc, 0x99, 0xce, 0x3a, 0x8d, 0x83, 0x8b, - 0x20, 0xc9, 0xf3, 0xf8, 0x43, 0xb4, 0x03, 0xcf, 0x42, 0x16, 0xa9, 0xee, 0x4f, 0xc0, 0x0d, 0xb8, - 0x17, 0xab, 0xb3, 0xae, 0x38, 0x77, 0xd2, 0x59, 0x67, 0xa7, 0xbf, 0x98, 0x24, 0xcb, 0x78, 0xcc, - 0xd1, 0xd6, 0xb0, 0x60, 0x59, 0xbd, 0x68, 0x6f, 0xf5, 0xa2, 0x57, 0xd9, 0xdc, 0xc1, 0xe9, 0xac, - 0xb3, 0x55, 0xcc, 0x90, 0x05, 0x76, 0xf3, 0x17, 0x03, 0xe1, 0x65, 0x95, 0xf0, 0x5d, 0x54, 0x13, - 0x32, 0xaa, 0xff, 0x22, 0x9b, 0x5a, 0xb4, 0x5a, 0x06, 0xcd, 0x72, 0x78, 0x8a, 0x6e, 0xe7, 0x0b, - 0x3c, 0x65, 0x3e, 0xc4, 0x82, 0xfa, 0xa1, 0x3e, 0xed, 0x37, 0xd7, 0xf3, 0x92, 0x2c, 0x73, 0x5e, - 0xd1, 0xf4, 0xb7, 0xfb, 0xcb, 0x74, 0xe4, 0xaa, 0x1e, 0xe6, 0x8f, 0x65, 0xd4, 0xd4, 0x63, 0x4f, - 0x18, 0x7c, 0xf7, 0x1f, 0x78, 0xf9, 0x51, 0xc1, 0xcb, 0xf7, 0xd6, 0xf2, 0x9d, 0x1c, 0xed, 0x5a, - 0x2b, 0x7f, 0xbe, 0x60, 0x65, 0x7b, 0x7d, 0xca, 0xd5, 0x4e, 0x76, 0xd1, 0xff, 0x16, 0xfa, 0xaf, - 0x77, 0x9c, 0x05, 0xb3, 0x97, 0x57, 0x9b, 0xdd, 0xfc, 0xcb, 0x40, 0x3b, 0x4b, 0x23, 0xe1, 0xf7, - 0xd1, 0xe6, 0xdc, 0xe4, 0x90, 0xdd, 0xb0, 0x75, 0xe7, 0x8e, 0xee, 0xb7, 0x79, 0x30, 0x9f, 0x24, - 0x45, 0x2c, 0xfe, 0x18, 0x55, 0x93, 0x18, 0x22, 0xad, 0xf0, 0x1b, 0xab, 0xe5, 0x38, 0x8a, 0x21, - 0x1a, 0xf0, 0x93, 0x20, 0x97, 0x56, 0x46, 0x88, 0x62, 0x28, 0x6e, 0x52, 0xbd, 0xe1, 0x6f, 0x7b, - 0x17, 0xd5, 0x20, 0x8a, 0x82, 0x48, 0xdf, 0xdb, 0x97, 0xda, 0xf4, 0x65, 0x90, 0x64, 0x39, 0xf3, - 0xd7, 0x32, 0xaa, 0x5f, 0xb4, 0xc4, 0x6f, 0xa3, 0xba, 0x6c, 0xa3, 0x2e, 0xfb, 0x4c, 0xd0, 0x6d, - 0x5d, 0xa4, 0x30, 0x32, 0x4e, 0x2e, 0x11, 0xf8, 0x35, 0x54, 0x49, 0x98, 0xa7, 0xdf, 0x90, 0xe6, - 0xdc, 0xa5, 0x4f, 0x64, 0x1c, 0x9b, 0x68, 0x63, 0x14, 0x05, 0x49, 0x28, 0x6d, 0x20, 0x07, 0x45, - 0xf2, 0x44, 0x3f, 0x52, 0x11, 0xa2, 0x33, 0xf8, 0x18, 0xd5, 0x40, 0xde, 0xf9, 0x6a, 0x97, 0x66, - 0xaf, 0xbb, 0x9e, 0x34, 0x96, 0x7a, 0x27, 0xfa, 0x5c, 0x44, 0xd3, 0xb9, 0xad, 0x64, 0x8c, 0x64, - 0x74, 0xbb, 0x43, 0xfd, 0x96, 0x28, 0x0c, 0xde, 0x46, 0x95, 0x31, 0x4c, 0xb3, 0x8d, 0x88, 0xfc, - 0x89, 0x3f, 0x40, 0xb5, 0x89, 0x7c, 0x66, 0xf4, 0x91, 0xec, 0xaf, 0xee, 0x9b, 0x3f, 0x4b, 0x24, - 0x2b, 0x7b, 0x58, 0x7e, 0x60, 0x38, 0xfb, 0x67, 0xe7, 0xed, 0xd2, 0x8b, 0xf3, 0x76, 0xe9, 0xe5, - 0x79, 0xbb, 0xf4, 0x3c, 0x6d, 0x1b, 0x67, 0x69, 0xdb, 0x78, 0x91, 0xb6, 0x8d, 0x97, 0x69, 0xdb, - 0xf8, 0x23, 0x6d, 0x1b, 0xdf, 0xff, 0xd9, 0x2e, 0x7d, 0x59, 0x9e, 0x74, 0xff, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x51, 0xcc, 0x53, 0x28, 0xe2, 0x08, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authentication/v1/generated.proto", fileDescriptor_d1237cbf54dccd53) +} + +var fileDescriptor_d1237cbf54dccd53 = []byte{ + // 947 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0xc5, + 0x13, 0xf7, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0xfe, 0x49, 0xef, 0x7f, 0x85, 0x15, 0x16, 0x4f, 0x98, + 0x95, 0x50, 0x04, 0xbb, 0x33, 0x1b, 0x8b, 0xc7, 0x6a, 0x91, 0x90, 0x32, 0xc4, 0x02, 0x0b, 0xc1, + 0xae, 0xda, 0x49, 0x40, 0x48, 0x48, 0xb4, 0xc7, 0x1d, 0xa7, 0xf1, 0xce, 0x83, 0x99, 0x1e, 0xb3, + 0xbe, 0xed, 0x47, 0xe0, 0x08, 0x12, 0x07, 0x3e, 0x04, 0x12, 0x5f, 0x21, 0xc7, 0x15, 0xe2, 0xb0, + 0x07, 0x64, 0x91, 0xe1, 0xca, 0x91, 0x13, 0x27, 0xd4, 0x3d, 0x1d, 0xdb, 0x63, 0x27, 0x13, 0x9f, + 0xf6, 0xe6, 0xa9, 0xc7, 0xaf, 0xaa, 0x7e, 0x55, 0x5d, 0x65, 0xb8, 0x3b, 0x7c, 0x10, 0x99, 0xcc, + 0xb7, 0x48, 0xc0, 0x2c, 0x12, 0xf3, 0x53, 0xea, 0x71, 0xe6, 0x10, 0xce, 0x7c, 0xcf, 0x1a, 0xed, + 0x59, 0x03, 0xea, 0xd1, 0x90, 0x70, 0xda, 0x37, 0x83, 0xd0, 0xe7, 0x3e, 0xba, 0x9d, 0x5a, 0x9b, + 0x24, 0x60, 0x66, 0xd6, 0xda, 0x1c, 0xed, 0x6d, 0xdf, 0x1b, 0x30, 0x7e, 0x1a, 0xf7, 0x4c, 0xc7, + 0x77, 0xad, 0x81, 0x3f, 0xf0, 0x2d, 0xe9, 0xd4, 0x8b, 0x4f, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x52, + 0xb0, 0xed, 0xb7, 0x67, 0xa1, 0x5d, 0xe2, 0x9c, 0x32, 0x8f, 0x86, 0x63, 0x2b, 0x18, 0x0e, 0x84, + 0x20, 0xb2, 0x5c, 0xca, 0xc9, 0x25, 0x29, 0x6c, 0x5b, 0x57, 0x79, 0x85, 0xb1, 0xc7, 0x99, 0x4b, + 0x97, 0x1c, 0xde, 0xbd, 0xce, 0x21, 0x72, 0x4e, 0xa9, 0x4b, 0x16, 0xfd, 0x8c, 0xdf, 0x34, 0xf8, + 0xbf, 0xed, 0xc7, 0x5e, 0xff, 0x51, 0xef, 0x1b, 0xea, 0x70, 0x4c, 0x4f, 0x68, 0x48, 0x3d, 0x87, + 0xa2, 0x1d, 0x28, 0x0f, 0x99, 0xd7, 0x6f, 0x68, 0x3b, 0xda, 0x6e, 0xcd, 0xbe, 0x71, 0x36, 0xd1, + 0x0b, 0xc9, 0x44, 0x2f, 0x7f, 0xc2, 0xbc, 0x3e, 0x96, 0x1a, 0xd4, 0x02, 0x20, 0x01, 0x3b, 0xa6, + 0x61, 0xc4, 0x7c, 0xaf, 0x51, 0x94, 0x76, 0x48, 0xd9, 0xc1, 0xfe, 0xe3, 0x8e, 0xd2, 0xe0, 0x39, + 0x2b, 0x81, 0xea, 0x11, 0x97, 0x36, 0x4a, 0x59, 0xd4, 0xcf, 0x88, 0x4b, 0xb1, 0xd4, 0x20, 0x1b, + 0x4a, 0x71, 0xe7, 0xa0, 0x51, 0x96, 0x06, 0xf7, 0x95, 0x41, 0xe9, 0xa8, 0x73, 0xf0, 0xef, 0x44, + 0x7f, 0xfd, 0xaa, 0x22, 0xf9, 0x38, 0xa0, 0x91, 0x79, 0xd4, 0x39, 0xc0, 0xc2, 0xd9, 0x78, 0x0f, + 0xa0, 0xfd, 0x94, 0x87, 0xe4, 0x98, 0x3c, 0x89, 0x29, 0xd2, 0xa1, 0xc2, 0x38, 0x75, 0xa3, 0x86, + 0xb6, 0x53, 0xda, 0xad, 0xd9, 0xb5, 0x64, 0xa2, 0x57, 0x3a, 0x42, 0x80, 0x53, 0xf9, 0xc3, 0xea, + 0x0f, 0x3f, 0xeb, 0x85, 0x67, 0x7f, 0xec, 0x14, 0x8c, 0xdf, 0x35, 0xd8, 0xea, 0xd2, 0x27, 0x27, + 0xdd, 0x58, 0xb1, 0x31, 0x62, 0xf4, 0x3b, 0xf4, 0x35, 0x54, 0x45, 0x9f, 0xfa, 0x84, 0x13, 0x49, + 0x47, 0xbd, 0x75, 0xdf, 0x9c, 0x8d, 0xc8, 0x34, 0x13, 0x33, 0x18, 0x0e, 0x84, 0x20, 0x32, 0x85, + 0xb5, 0x39, 0xda, 0x33, 0x53, 0x4e, 0x3f, 0xa5, 0x9c, 0xcc, 0x88, 0x99, 0xc9, 0xf0, 0x14, 0x15, + 0x7d, 0x05, 0x6b, 0x11, 0x27, 0x3c, 0x8e, 0x24, 0x8d, 0xf5, 0xd6, 0x3b, 0x66, 0xde, 0x08, 0x9a, + 0x4b, 0x29, 0x76, 0xa5, 0xb3, 0xbd, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x58, 0x81, 0x1a, 0x3e, 0xbc, + 0x72, 0x85, 0x0b, 0x3a, 0x84, 0x6a, 0x1c, 0xd1, 0xb0, 0xe3, 0x9d, 0xf8, 0xaa, 0xb6, 0x37, 0xf2, + 0x63, 0x1f, 0x29, 0x6b, 0x7b, 0x53, 0x05, 0xab, 0x5e, 0x48, 0xf0, 0x14, 0xc9, 0xf8, 0xa9, 0x08, + 0x37, 0x0e, 0xfd, 0x21, 0xf5, 0x30, 0xfd, 0x36, 0xa6, 0x11, 0x7f, 0x09, 0x14, 0x3e, 0x86, 0x72, + 0x14, 0x50, 0x47, 0x11, 0x68, 0xe6, 0x17, 0x31, 0x9f, 0x5b, 0x37, 0xa0, 0xce, 0x6c, 0x12, 0xc5, + 0x17, 0x96, 0x48, 0xe8, 0x8b, 0x69, 0x53, 0x4a, 0x4b, 0x19, 0x5f, 0x87, 0x99, 0xdf, 0x8f, 0x7f, + 0x34, 0xd8, 0x5c, 0x4c, 0x01, 0xbd, 0x05, 0x35, 0x12, 0xf7, 0x99, 0x78, 0x7c, 0x17, 0xa3, 0xba, + 0x9e, 0x4c, 0xf4, 0xda, 0xfe, 0x85, 0x10, 0xcf, 0xf4, 0xe8, 0x43, 0xd8, 0xa2, 0x4f, 0x03, 0x16, + 0xca, 0xe8, 0x5d, 0xea, 0xf8, 0x5e, 0x3f, 0x92, 0x6f, 0xa6, 0x64, 0xdf, 0x4a, 0x26, 0xfa, 0x56, + 0x7b, 0x51, 0x89, 0x97, 0xed, 0x91, 0x07, 0x1b, 0xbd, 0xcc, 0xd3, 0x57, 0x85, 0xb6, 0xf2, 0x0b, + 0xbd, 0x6c, 0x5d, 0xd8, 0x28, 0x99, 0xe8, 0x1b, 0x59, 0x0d, 0x5e, 0x40, 0x37, 0x7e, 0xd1, 0x00, + 0x2d, 0xb3, 0x84, 0xee, 0x40, 0x85, 0x0b, 0xa9, 0x5a, 0x35, 0xeb, 0x8a, 0xb4, 0x4a, 0x6a, 0x9a, + 0xea, 0xd0, 0x18, 0x6e, 0xce, 0x0a, 0x38, 0x64, 0x2e, 0x8d, 0x38, 0x71, 0x03, 0xd5, 0xed, 0x37, + 0x57, 0x9b, 0x25, 0xe1, 0x66, 0xbf, 0xaa, 0xe0, 0x6f, 0xb6, 0x97, 0xe1, 0xf0, 0x65, 0x31, 0x8c, + 0x1f, 0x8b, 0x50, 0x57, 0x69, 0xbf, 0xa4, 0x75, 0xf0, 0x28, 0x33, 0xcb, 0xf7, 0x56, 0x9a, 0x3b, + 0xf9, 0xa6, 0xaf, 0x1a, 0xe5, 0xcf, 0x17, 0x46, 0xd9, 0x5a, 0x1d, 0x32, 0x7f, 0x92, 0x1d, 0xf8, + 0xdf, 0x42, 0xfc, 0xd5, 0xda, 0x99, 0x19, 0xf6, 0x62, 0xfe, 0xb0, 0x1b, 0x7f, 0x6b, 0xb0, 0xb5, + 0x94, 0x12, 0x7a, 0x1f, 0xd6, 0xe7, 0x32, 0xa7, 0xe9, 0xa5, 0xaa, 0xda, 0xb7, 0x54, 0xbc, 0xf5, + 0xfd, 0x79, 0x25, 0xce, 0xda, 0xa2, 0x8f, 0xa1, 0x2c, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37, + 0xa5, 0x56, 0x48, 0xb0, 0x44, 0xc8, 0x56, 0x52, 0xbe, 0xe6, 0xd9, 0xde, 0x81, 0x0a, 0x0d, 0x43, + 0x3f, 0x54, 0xf7, 0x6f, 0xca, 0x4d, 0x5b, 0x08, 0x71, 0xaa, 0x33, 0x7e, 0x2d, 0xc2, 0x74, 0xa7, + 0xa2, 0xbb, 0xe9, 0x7e, 0x96, 0x47, 0x33, 0x25, 0x34, 0xb3, 0x77, 0x85, 0x1c, 0x4f, 0x2d, 0xd0, + 0x6b, 0x50, 0x8a, 0x59, 0x5f, 0xdd, 0xe2, 0xfa, 0xdc, 0xf1, 0xc4, 0x42, 0x8e, 0x0c, 0x58, 0x1b, + 0x84, 0x7e, 0x1c, 0x88, 0x31, 0x10, 0x89, 0x82, 0xe8, 0xe8, 0x47, 0x52, 0x82, 0x95, 0x06, 0x1d, + 0x43, 0x85, 0x8a, 0xdb, 0x29, 0x6b, 0xa9, 0xb7, 0xf6, 0x56, 0xa3, 0xc6, 0x94, 0xf7, 0xb6, 0xed, + 0xf1, 0x70, 0x3c, 0x57, 0x95, 0x90, 0xe1, 0x14, 0x6e, 0xbb, 0xa7, 0x6e, 0xb2, 0xb4, 0x41, 0x9b, + 0x50, 0x1a, 0xd2, 0x71, 0x5a, 0x11, 0x16, 0x3f, 0xd1, 0x07, 0x50, 0x19, 0x89, 0x73, 0xad, 0x5a, + 0xb2, 0x9b, 0x1f, 0x77, 0x76, 0xde, 0x71, 0xea, 0xf6, 0xb0, 0xf8, 0x40, 0xb3, 0xed, 0xb3, 0xf3, + 0x66, 0xe1, 0xf9, 0x79, 0xb3, 0xf0, 0xe2, 0xbc, 0x59, 0x78, 0x96, 0x34, 0xb5, 0xb3, 0xa4, 0xa9, + 0x3d, 0x4f, 0x9a, 0xda, 0x8b, 0xa4, 0xa9, 0xfd, 0x99, 0x34, 0xb5, 0xef, 0xff, 0x6a, 0x16, 0xbe, + 0xbc, 0x9d, 0xf7, 0x67, 0xf0, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xb7, 0xc1, 0xa0, 0x2b, + 0x0a, 0x00, 0x00, } func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { @@ -451,6 +512,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -850,6 +987,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenRequest) Size() (n int) { if m == nil { return 0 @@ -999,6 +1160,27 @@ func (this *BoundObjectReference) String() string { }, "") return s } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenRequest) String() string { if this == nil { return "nil" @@ -1361,6 +1543,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index 8f928be40..ae9763576 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -26,7 +26,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/authentication/v1"; // BoundObjectReference is a reference to an object that a token is bound to. message BoundObjectReference { @@ -56,12 +56,32 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional UserInfo userInfo = 1; +} + // TokenRequest requests a token for a given service account. message TokenRequest { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenRequestSpec spec = 2; @@ -74,11 +94,12 @@ message TokenRequest { // TokenRequestSpec contains client provided parameters of a token request. message TokenRequestSpec { // Audiences are the intendend audiences of the token. A recipient of a - // token must identitfy themself with an identifier in the list of + // token must identify themself with an identifier in the list of // audiences of the token, and otherwise should reject the token. A // token issued for multiple audiences may be used to authenticate // against any of the audiences listed but implies a high degree of // trust between the target audiences. + // +listType=atomic repeated string audiences = 1; // ExpirationSeconds is the requested duration of validity of the request. The @@ -102,7 +123,7 @@ message TokenRequestStatus { optional string token = 1; // ExpirationTimestamp is the time of expiration of the returned token. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; } // TokenReview attempts to authenticate a token to a known user. @@ -112,7 +133,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; @@ -134,6 +155,7 @@ message TokenReviewSpec { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic repeated string audiences = 2; } @@ -157,6 +179,7 @@ message TokenReviewStatus { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic repeated string audiences = 4; // Error indicates that the token couldn't be checked @@ -179,6 +202,7 @@ message UserInfo { // The names of groups this user is a part of. // +optional + // +listType=atomic repeated string groups = 3; // Any additional information provided by the authenticator. diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go index c522e4a46..6a32b5926 100644 --- a/vendor/k8s.io/api/authentication/v1/register.go +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -46,6 +46,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, &TokenRequest{}, + &SelfSubjectReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 7ba247d63..2dc0707c4 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -45,6 +45,7 @@ const ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator @@ -75,6 +76,7 @@ type TokenReviewSpec struct { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } @@ -96,6 +98,7 @@ type TokenReviewStatus struct { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional @@ -115,6 +118,7 @@ type UserInfo struct { UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` // The names of groups this user is a part of. // +optional + // +listType=atomic Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` // Any additional information provided by the authenticator. // +optional @@ -131,6 +135,7 @@ func (t ExtraValue) String() string { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // TokenRequest requests a token for a given service account. type TokenRequest struct { @@ -151,11 +156,12 @@ type TokenRequest struct { // TokenRequestSpec contains client provided parameters of a token request. type TokenRequestSpec struct { // Audiences are the intendend audiences of the token. A recipient of a - // token must identitfy themself with an identifier in the list of + // token must identify themself with an identifier in the list of // audiences of the token, and otherwise should reject the token. A // token issued for multiple audiences may be used to authenticate // against any of the audiences listed but implies a high degree of // trust between the target audiences. + // +listType=atomic Audiences []string `json:"audiences" protobuf:"bytes,1,rep,name=audiences"` // ExpirationSeconds is the requested duration of validity of the request. The @@ -197,3 +203,29 @@ type BoundObjectReference struct { // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"` } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index f9a88a3df..ebfd4852c 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_BoundObjectReference = map[string]string{ @@ -39,6 +39,25 @@ func (BoundObjectReference) SwaggerDoc() map[string]string { return map_BoundObjectReference } +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenRequest = map[string]string{ "": "TokenRequest requests a token for a given service account.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -52,7 +71,7 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_TokenRequestSpec = map[string]string{ "": "TokenRequestSpec contains client provided parameters of a token request.", - "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", + "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", } diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index 2af533191..369c89b86 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -61,6 +61,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b612bdec4 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/doc.go b/vendor/k8s.io/api/authentication/v1alpha1/doc.go new file mode 100644 index 000000000..eb32def90 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +package v1alpha1 // import "k8s.io/api/authentication/v1alpha1" diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go b/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go new file mode 100644 index 000000000..98c106ec6 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go @@ -0,0 +1,566 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/authentication/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_f003acd72d3d5efb, []int{0} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f003acd72d3d5efb, []int{1} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1alpha1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1alpha1.SelfSubjectReviewStatus") +} + +func init() { + proto.RegisterFile("k8s.io/api/authentication/v1alpha1/generated.proto", fileDescriptor_f003acd72d3d5efb) +} + +var fileDescriptor_f003acd72d3d5efb = []byte{ + // 368 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x4f, 0xe2, 0x40, + 0x14, 0xc7, 0x3b, 0x7b, 0x20, 0xa4, 0x9b, 0x6c, 0x76, 0x7b, 0x59, 0xc2, 0x61, 0x30, 0x3d, 0x18, + 0x0f, 0x3a, 0x23, 0xc4, 0x18, 0x13, 0x6f, 0x3d, 0xe9, 0xc1, 0x98, 0x14, 0xbd, 0x78, 0xf2, 0x51, + 0x1e, 0xed, 0x08, 0xed, 0x34, 0xed, 0x14, 0xe3, 0xcd, 0x8f, 0xe0, 0xc7, 0xe2, 0xc8, 0x91, 0x78, + 0x20, 0x52, 0xbf, 0x88, 0xe9, 0x50, 0x20, 0x82, 0xc0, 0xad, 0xef, 0xe5, 0xfd, 0x7e, 0xef, 0xdf, + 0x99, 0x31, 0x5b, 0xfd, 0x8b, 0x94, 0x09, 0xc9, 0x21, 0x16, 0x1c, 0x32, 0x15, 0x60, 0xa4, 0x84, + 0x07, 0x4a, 0xc8, 0x88, 0x0f, 0x9b, 0x30, 0x88, 0x03, 0x68, 0x72, 0x1f, 0x23, 0x4c, 0x40, 0x61, + 0x97, 0xc5, 0x89, 0x54, 0xd2, 0xb2, 0xe7, 0x0c, 0x83, 0x58, 0xb0, 0xef, 0x0c, 0x5b, 0x30, 0xf5, + 0x13, 0x5f, 0xa8, 0x20, 0xeb, 0x30, 0x4f, 0x86, 0xdc, 0x97, 0xbe, 0xe4, 0x1a, 0xed, 0x64, 0x3d, + 0x5d, 0xe9, 0x42, 0x7f, 0xcd, 0x95, 0xf5, 0xe3, 0x5d, 0x31, 0xd6, 0x03, 0xd4, 0xcf, 0x56, 0xd3, + 0x21, 0x78, 0x81, 0x88, 0x30, 0x79, 0xe1, 0x71, 0xdf, 0x2f, 0x1a, 0x29, 0x0f, 0x51, 0xc1, 0x4f, + 0x14, 0xdf, 0x46, 0x25, 0x59, 0xa4, 0x44, 0x88, 0x1b, 0xc0, 0xf9, 0x3e, 0x20, 0xf5, 0x02, 0x0c, + 0x61, 0x9d, 0xb3, 0xdf, 0x89, 0xf9, 0xaf, 0x8d, 0x83, 0x5e, 0x3b, 0xeb, 0x3c, 0xa1, 0xa7, 0x5c, + 0x1c, 0x0a, 0x7c, 0xb6, 0x1e, 0xcd, 0x6a, 0x91, 0xac, 0x0b, 0x0a, 0x6a, 0xe4, 0x80, 0x1c, 0xfd, + 0x6e, 0x9d, 0xb2, 0xd5, 0x41, 0x2e, 0x17, 0xb0, 0xb8, 0xef, 0x17, 0x8d, 0x94, 0x15, 0xd3, 0x6c, + 0xd8, 0x64, 0xb7, 0xda, 0x72, 0x83, 0x0a, 0x1c, 0x6b, 0x34, 0x6d, 0x18, 0xf9, 0xb4, 0x61, 0xae, + 0x7a, 0xee, 0xd2, 0x6a, 0x79, 0x66, 0x25, 0x55, 0xa0, 0xb2, 0xb4, 0xf6, 0x4b, 0xfb, 0x2f, 0xd9, + 0xfe, 0x8b, 0x62, 0x1b, 0x41, 0xdb, 0x5a, 0xe1, 0xfc, 0x29, 0x57, 0x55, 0xe6, 0xb5, 0x5b, 0xaa, + 0x6d, 0x69, 0xfe, 0xdf, 0x82, 0x58, 0x77, 0x66, 0x35, 0x4b, 0x31, 0xb9, 0x8e, 0x7a, 0xb2, 0xfc, + 0xc3, 0xc3, 0x9d, 0x09, 0xd8, 0x7d, 0x39, 0xed, 0xfc, 0x2d, 0x97, 0x55, 0x17, 0x1d, 0x77, 0x69, + 0x72, 0xae, 0x46, 0x33, 0x6a, 0x8c, 0x67, 0xd4, 0x98, 0xcc, 0xa8, 0xf1, 0x9a, 0x53, 0x32, 0xca, + 0x29, 0x19, 0xe7, 0x94, 0x4c, 0x72, 0x4a, 0x3e, 0x72, 0x4a, 0xde, 0x3e, 0xa9, 0xf1, 0x60, 0xef, + 0x7f, 0xc7, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x04, 0xfb, 0xb6, 0xfb, 0xec, 0x02, 0x00, 0x00, +} + +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto new file mode 100644 index 000000000..4585e5cdd --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -0,0 +1,51 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.authentication.v1alpha1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/authentication/v1alpha1"; + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; +} + diff --git a/vendor/k8s.io/api/authentication/v1alpha1/register.go b/vendor/k8s.io/api/authentication/v1alpha1/register.go new file mode 100644 index 000000000..4f93928d9 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types.go b/vendor/k8s.io/api/authentication/v1alpha1/types.go new file mode 100644 index 000000000..1ee3612fb --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/types.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo v1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..1ffcc99e7 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,49 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..9b146a847 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..62a70a781 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 3d8f76515..415392644 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto +// source: k8s.io/api/authentication/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{0} + return fileDescriptor_fdc2de40fd7f3b21, []int{0} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -72,10 +72,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} } +func (*SelfSubjectReview) ProtoMessage() {} +func (*SelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_fdc2de40fd7f3b21, []int{1} +} +func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReview.Merge(m, src) +} +func (m *SelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo + +func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} } +func (*SelfSubjectReviewStatus) ProtoMessage() {} +func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_fdc2de40fd7f3b21, []int{2} +} +func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src) +} +func (m *SelfSubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo + func (m *TokenReview) Reset() { *m = TokenReview{} } func (*TokenReview) ProtoMessage() {} func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{1} + return fileDescriptor_fdc2de40fd7f3b21, []int{3} } func (m *TokenReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +159,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } func (*TokenReviewSpec) ProtoMessage() {} func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{2} + return fileDescriptor_fdc2de40fd7f3b21, []int{4} } func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +187,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } func (*TokenReviewStatus) ProtoMessage() {} func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{3} + return fileDescriptor_fdc2de40fd7f3b21, []int{5} } func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +215,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo func (m *UserInfo) Reset() { *m = UserInfo{} } func (*UserInfo) ProtoMessage() {} func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{4} + return fileDescriptor_fdc2de40fd7f3b21, []int{6} } func (m *UserInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,6 +242,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReview") + proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReviewStatus") proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") @@ -194,53 +252,56 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) -} - -var fileDescriptor_77c9b20d3ad27844 = []byte{ - // 663 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xb6, 0xf3, 0x53, 0x92, 0x0d, 0x81, 0xb2, 0x12, 0x52, 0x14, 0x09, 0xa7, 0x84, 0x4b, 0xa5, - 0xd2, 0x35, 0xad, 0xaa, 0x52, 0x95, 0x53, 0x0d, 0x15, 0x2a, 0x52, 0x85, 0xb4, 0xb4, 0x1c, 0x80, - 0x03, 0x1b, 0x67, 0xea, 0x98, 0xe0, 0x1f, 0xad, 0xd7, 0x81, 0xde, 0xfa, 0x08, 0x1c, 0x39, 0x22, - 0xf1, 0x24, 0xdc, 0x7a, 0xec, 0xb1, 0x07, 0x14, 0x51, 0xf3, 0x04, 0xbc, 0x01, 0xda, 0xf5, 0xb6, - 0x4e, 0x1b, 0x41, 0xdb, 0x9b, 0xf7, 0x9b, 0xf9, 0xbe, 0x99, 0xf9, 0xc6, 0x83, 0x5e, 0x0c, 0xd7, - 0x12, 0xe2, 0x47, 0xf6, 0x30, 0xed, 0x01, 0x0f, 0x41, 0x40, 0x62, 0x8f, 0x20, 0xec, 0x47, 0xdc, - 0xd6, 0x01, 0x16, 0xfb, 0x36, 0x4b, 0xc5, 0x00, 0x42, 0xe1, 0xbb, 0x4c, 0xf8, 0x51, 0x68, 0x8f, - 0x96, 0x7a, 0x20, 0xd8, 0x92, 0xed, 0x41, 0x08, 0x9c, 0x09, 0xe8, 0x93, 0x98, 0x47, 0x22, 0xc2, - 0xf7, 0x73, 0x0a, 0x61, 0xb1, 0x4f, 0xce, 0x53, 0x88, 0xa6, 0xb4, 0x17, 0x3d, 0x5f, 0x0c, 0xd2, - 0x1e, 0x71, 0xa3, 0xc0, 0xf6, 0x22, 0x2f, 0xb2, 0x15, 0xb3, 0x97, 0xee, 0xa9, 0x97, 0x7a, 0xa8, - 0xaf, 0x5c, 0xb1, 0xbd, 0x52, 0x34, 0x11, 0x30, 0x77, 0xe0, 0x87, 0xc0, 0xf7, 0xed, 0x78, 0xe8, - 0x49, 0x20, 0xb1, 0x03, 0x10, 0xcc, 0x1e, 0x4d, 0xf5, 0xd1, 0xb6, 0xff, 0xc5, 0xe2, 0x69, 0x28, - 0xfc, 0x00, 0xa6, 0x08, 0xab, 0x97, 0x11, 0x12, 0x77, 0x00, 0x01, 0xbb, 0xc8, 0xeb, 0x3e, 0x46, - 0x68, 0xf3, 0xb3, 0xe0, 0xec, 0x35, 0xfb, 0x98, 0x02, 0xee, 0xa0, 0xaa, 0x2f, 0x20, 0x48, 0x5a, - 0xe6, 0x5c, 0x79, 0xbe, 0xee, 0xd4, 0xb3, 0x71, 0xa7, 0xba, 0x25, 0x01, 0x9a, 0xe3, 0xeb, 0xb5, - 0xaf, 0xdf, 0x3a, 0xc6, 0xc1, 0xcf, 0x39, 0xa3, 0xfb, 0xbd, 0x84, 0x1a, 0x3b, 0xd1, 0x10, 0x42, - 0x0a, 0x23, 0x1f, 0x3e, 0xe1, 0xf7, 0xa8, 0x26, 0x87, 0xe9, 0x33, 0xc1, 0x5a, 0xe6, 0x9c, 0x39, - 0xdf, 0x58, 0x7e, 0x44, 0x0a, 0x33, 0xcf, 0x7a, 0x22, 0xf1, 0xd0, 0x93, 0x40, 0x42, 0x64, 0x36, - 0x19, 0x2d, 0x91, 0x97, 0xbd, 0x0f, 0xe0, 0x8a, 0x6d, 0x10, 0xcc, 0xc1, 0x87, 0xe3, 0x8e, 0x91, - 0x8d, 0x3b, 0xa8, 0xc0, 0xe8, 0x99, 0x2a, 0xde, 0x41, 0x95, 0x24, 0x06, 0xb7, 0x55, 0x52, 0xea, - 0xcb, 0xe4, 0xd2, 0x55, 0x91, 0x89, 0xfe, 0x5e, 0xc5, 0xe0, 0x3a, 0x37, 0xb5, 0x7e, 0x45, 0xbe, - 0xa8, 0x52, 0xc3, 0xef, 0xd0, 0x4c, 0x22, 0x98, 0x48, 0x93, 0x56, 0x59, 0xe9, 0xae, 0x5c, 0x53, - 0x57, 0x71, 0x9d, 0x5b, 0x5a, 0x79, 0x26, 0x7f, 0x53, 0xad, 0xd9, 0x75, 0xd1, 0xed, 0x0b, 0x4d, - 0xe0, 0x07, 0xa8, 0x2a, 0x24, 0xa4, 0x5c, 0xaa, 0x3b, 0x4d, 0xcd, 0xac, 0xe6, 0x79, 0x79, 0x0c, - 0x2f, 0xa0, 0x3a, 0x4b, 0xfb, 0x3e, 0x84, 0x2e, 0x24, 0xad, 0x92, 0x5a, 0x46, 0x33, 0x1b, 0x77, - 0xea, 0x1b, 0xa7, 0x20, 0x2d, 0xe2, 0xdd, 0x3f, 0x26, 0xba, 0x33, 0xd5, 0x12, 0x7e, 0x82, 0x9a, - 0x13, 0xed, 0x43, 0x5f, 0xd5, 0xab, 0x39, 0x77, 0x75, 0xbd, 0xe6, 0xc6, 0x64, 0x90, 0x9e, 0xcf, - 0xc5, 0xdb, 0xa8, 0x92, 0x26, 0xc0, 0xb5, 0xd7, 0x0b, 0x57, 0xf0, 0x64, 0x37, 0x01, 0xbe, 0x15, - 0xee, 0x45, 0x85, 0xc9, 0x12, 0xa1, 0x4a, 0xe6, 0xfc, 0x38, 0x95, 0xff, 0x8f, 0x23, 0x0d, 0x02, - 0xce, 0x23, 0xae, 0x16, 0x32, 0x61, 0xd0, 0xa6, 0x04, 0x69, 0x1e, 0xeb, 0xfe, 0x28, 0xa1, 0xda, - 0x69, 0x49, 0xfc, 0x10, 0xd5, 0x64, 0x99, 0x90, 0x05, 0xa0, 0x5d, 0x9d, 0xd5, 0x24, 0x95, 0x23, - 0x71, 0x7a, 0x96, 0x81, 0xef, 0xa1, 0x72, 0xea, 0xf7, 0xd5, 0x68, 0x75, 0xa7, 0xa1, 0x13, 0xcb, - 0xbb, 0x5b, 0xcf, 0xa8, 0xc4, 0x71, 0x17, 0xcd, 0x78, 0x3c, 0x4a, 0x63, 0xf9, 0x43, 0xc8, 0x46, - 0x91, 0x5c, 0xeb, 0x73, 0x85, 0x50, 0x1d, 0xc1, 0x6f, 0x51, 0x15, 0xe4, 0xd5, 0xa8, 0x59, 0x1a, - 0xcb, 0xab, 0xd7, 0xf0, 0x87, 0xa8, 0x73, 0xdb, 0x0c, 0x05, 0xdf, 0x9f, 0x18, 0x4d, 0x62, 0x34, - 0xd7, 0x6c, 0x7b, 0xfa, 0x24, 0x55, 0x0e, 0x9e, 0x45, 0xe5, 0x21, 0xec, 0xe7, 0x63, 0x51, 0xf9, - 0x89, 0x9f, 0xa2, 0xea, 0x48, 0x5e, 0xab, 0x5e, 0xce, 0xe2, 0x15, 0x8a, 0x17, 0x27, 0x4e, 0x73, - 0xee, 0x7a, 0x69, 0xcd, 0x74, 0x16, 0x0f, 0x4f, 0x2c, 0xe3, 0xe8, 0xc4, 0x32, 0x8e, 0x4f, 0x2c, - 0xe3, 0x20, 0xb3, 0xcc, 0xc3, 0xcc, 0x32, 0x8f, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x57, 0x66, - 0x99, 0x5f, 0x7e, 0x5b, 0xc6, 0x9b, 0x1b, 0x5a, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, - 0xbb, 0x89, 0x53, 0x68, 0x05, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_fdc2de40fd7f3b21) +} + +var fileDescriptor_fdc2de40fd7f3b21 = []byte{ + // 711 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x4e, 0xdb, 0x4e, + 0x10, 0x8e, 0xf3, 0x07, 0x25, 0x9b, 0x5f, 0x7e, 0x85, 0x95, 0xaa, 0xa2, 0x48, 0x75, 0x20, 0x95, + 0x2a, 0x24, 0x60, 0xdd, 0x20, 0x44, 0x11, 0x3d, 0xe1, 0x16, 0x21, 0x0e, 0xa8, 0xd2, 0x06, 0x7a, + 0x68, 0x7b, 0xe8, 0xc6, 0x19, 0x1c, 0x37, 0xc4, 0xb6, 0xec, 0x75, 0x5a, 0x6e, 0x3c, 0x42, 0x8f, + 0x3d, 0x56, 0xea, 0x93, 0xf4, 0xc6, 0x91, 0x23, 0x95, 0xaa, 0xa8, 0xb8, 0x4f, 0xd0, 0x37, 0xa8, + 0x76, 0xbd, 0x38, 0x09, 0x94, 0x00, 0x97, 0xde, 0xbc, 0xdf, 0xce, 0xf7, 0xcd, 0xcc, 0x37, 0xa3, + 0x35, 0x6a, 0x74, 0xd7, 0x43, 0xe2, 0x78, 0x06, 0xf3, 0x1d, 0x83, 0x45, 0xbc, 0x03, 0x2e, 0x77, + 0x2c, 0xc6, 0x1d, 0xcf, 0x35, 0xfa, 0x8d, 0x16, 0x70, 0xd6, 0x30, 0x6c, 0x70, 0x21, 0x60, 0x1c, + 0xda, 0xc4, 0x0f, 0x3c, 0xee, 0xe1, 0xf9, 0x84, 0x42, 0x98, 0xef, 0x90, 0x71, 0x0a, 0x51, 0x94, + 0xea, 0xb2, 0xed, 0xf0, 0x4e, 0xd4, 0x22, 0x96, 0xd7, 0x33, 0x6c, 0xcf, 0xf6, 0x0c, 0xc9, 0x6c, + 0x45, 0x07, 0xf2, 0x24, 0x0f, 0xf2, 0x2b, 0x51, 0xac, 0x2e, 0x4d, 0x2a, 0xe2, 0x72, 0xfe, 0xea, + 0xea, 0x30, 0xba, 0xc7, 0xac, 0x8e, 0xe3, 0x42, 0x70, 0x64, 0xf8, 0x5d, 0x5b, 0x00, 0xa1, 0xd1, + 0x03, 0xce, 0xfe, 0xc6, 0x32, 0xae, 0x63, 0x05, 0x91, 0xcb, 0x9d, 0x1e, 0x5c, 0x21, 0xac, 0xdd, + 0x44, 0x08, 0xad, 0x0e, 0xf4, 0xd8, 0x65, 0x5e, 0xfd, 0x29, 0x42, 0x5b, 0x1f, 0x79, 0xc0, 0x5e, + 0xb1, 0xc3, 0x08, 0x70, 0x0d, 0x15, 0x1c, 0x0e, 0xbd, 0x70, 0x56, 0x9b, 0xcb, 0x2d, 0x94, 0xcc, + 0x52, 0x3c, 0xa8, 0x15, 0x76, 0x04, 0x40, 0x13, 0x7c, 0xa3, 0xf8, 0xf9, 0x4b, 0x2d, 0x73, 0xfc, + 0x63, 0x2e, 0x53, 0xff, 0xae, 0xa1, 0x99, 0x26, 0x1c, 0x1e, 0x34, 0xa3, 0xd6, 0x7b, 0xb0, 0x38, + 0x85, 0xbe, 0x03, 0x1f, 0xf0, 0x3b, 0x54, 0x14, 0x2d, 0xb5, 0x19, 0x67, 0xb3, 0xda, 0x9c, 0xb6, + 0x50, 0x5e, 0x79, 0x42, 0x86, 0x03, 0x48, 0x2b, 0x23, 0x7e, 0xd7, 0x16, 0x40, 0x48, 0x44, 0x34, + 0xe9, 0x37, 0xc8, 0x4b, 0xa9, 0xb2, 0x0b, 0x9c, 0x99, 0xf8, 0x64, 0x50, 0xcb, 0xc4, 0x83, 0x1a, + 0x1a, 0x62, 0x34, 0x55, 0xc5, 0x2d, 0x34, 0x15, 0x72, 0xc6, 0xa3, 0x70, 0x36, 0x2b, 0xf5, 0x37, + 0xc8, 0x8d, 0x03, 0x26, 0x57, 0xea, 0x6c, 0x4a, 0x05, 0xf3, 0x7f, 0x95, 0x69, 0x2a, 0x39, 0x53, + 0xa5, 0x5c, 0xf7, 0xd0, 0x83, 0x6b, 0x28, 0x78, 0x0f, 0x15, 0xa3, 0x10, 0x82, 0x1d, 0xf7, 0xc0, + 0x53, 0x0d, 0x3e, 0x9e, 0x58, 0x00, 0xd9, 0x57, 0xd1, 0xe6, 0xb4, 0x4a, 0x56, 0xbc, 0x40, 0x68, + 0xaa, 0x54, 0xff, 0x9a, 0x45, 0xe5, 0x3d, 0xaf, 0x0b, 0xee, 0x3f, 0xb3, 0x71, 0x0f, 0xe5, 0x43, + 0x1f, 0x2c, 0x65, 0xe2, 0xca, 0x2d, 0x4c, 0x1c, 0xa9, 0xaf, 0xe9, 0x83, 0x65, 0xfe, 0xa7, 0xf4, + 0xf3, 0xe2, 0x44, 0xa5, 0x1a, 0x7e, 0x9b, 0x0e, 0x27, 0x27, 0x75, 0x57, 0xef, 0xa8, 0x3b, 0x79, + 0x2c, 0x16, 0xba, 0x77, 0xa9, 0x08, 0xfc, 0x08, 0x15, 0xb8, 0x80, 0xa4, 0x4b, 0x25, 0xb3, 0xa2, + 0x98, 0x85, 0x24, 0x2e, 0xb9, 0xc3, 0x8b, 0xa8, 0xc4, 0xa2, 0xb6, 0x03, 0xae, 0x05, 0x62, 0x6b, + 0xc4, 0x66, 0x57, 0xe2, 0x41, 0xad, 0xb4, 0x79, 0x01, 0xd2, 0xe1, 0x7d, 0xfd, 0xb7, 0x86, 0x66, + 0xae, 0x94, 0x84, 0x9f, 0xa1, 0xca, 0x48, 0xf9, 0xd0, 0x96, 0xf9, 0x8a, 0xe6, 0x7d, 0x95, 0xaf, + 0xb2, 0x39, 0x7a, 0x49, 0xc7, 0x63, 0xf1, 0x2e, 0xca, 0x8b, 0x49, 0x2b, 0xaf, 0x17, 0x6f, 0xe1, + 0x49, 0xba, 0x34, 0xa9, 0xc9, 0x02, 0xa1, 0x52, 0x66, 0xbc, 0x9d, 0xfc, 0xe4, 0x76, 0x84, 0x41, + 0x10, 0x04, 0x5e, 0x20, 0x07, 0x32, 0x62, 0xd0, 0x96, 0x00, 0x69, 0x72, 0x57, 0xff, 0x96, 0x45, + 0xe9, 0x56, 0xe2, 0xa5, 0x64, 0xc3, 0x5d, 0xd6, 0x03, 0xe5, 0xea, 0xd8, 0xe6, 0x0a, 0x9c, 0xa6, + 0x11, 0xf8, 0x21, 0xca, 0x45, 0x4e, 0x5b, 0xb6, 0x56, 0x32, 0xcb, 0x2a, 0x30, 0xb7, 0xbf, 0xf3, + 0x82, 0x0a, 0x1c, 0xd7, 0xd1, 0x94, 0x1d, 0x78, 0x91, 0x2f, 0x16, 0x42, 0x14, 0x8a, 0xc4, 0x58, + 0xb7, 0x25, 0x42, 0xd5, 0x0d, 0x7e, 0x83, 0x0a, 0x20, 0x9e, 0x20, 0xd9, 0x4b, 0x79, 0x65, 0xed, + 0x0e, 0xfe, 0x10, 0xf9, 0x76, 0x6d, 0xb9, 0x3c, 0x38, 0x1a, 0x69, 0x4d, 0x60, 0x34, 0xd1, 0xac, + 0xda, 0xea, 0x7d, 0x93, 0x31, 0x78, 0x1a, 0xe5, 0xba, 0x70, 0x94, 0xb4, 0x45, 0xc5, 0x27, 0x7e, + 0x8e, 0x0a, 0x7d, 0xf1, 0xf4, 0xa9, 0xe1, 0x2c, 0xdf, 0x22, 0xf9, 0xf0, 0xbd, 0xa4, 0x09, 0x77, + 0x23, 0xbb, 0xae, 0x99, 0xdb, 0x27, 0xe7, 0x7a, 0xe6, 0xf4, 0x5c, 0xcf, 0x9c, 0x9d, 0xeb, 0x99, + 0xe3, 0x58, 0xd7, 0x4e, 0x62, 0x5d, 0x3b, 0x8d, 0x75, 0xed, 0x2c, 0xd6, 0xb5, 0x9f, 0xb1, 0xae, + 0x7d, 0xfa, 0xa5, 0x67, 0x5e, 0xcf, 0xdf, 0xf8, 0x03, 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0x45, + 0x72, 0x2b, 0xf2, 0xe4, 0x06, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -275,6 +336,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -517,6 +654,30 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *SelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *TokenReview) Size() (n int) { if m == nil { return 0 @@ -603,6 +764,27 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *SelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectReviewStatus{`, + `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *TokenReview) String() string { if this == nil { return "nil" @@ -752,6 +934,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TokenReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index 67a32b320..d0f6fe440 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -21,12 +21,13 @@ syntax = "proto2"; package k8s.io.api.authentication.v1beta1; +import "k8s.io/api/authentication/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/authentication/v1beta1"; // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true @@ -37,6 +38,26 @@ message ExtraValue { repeated string items = 1; } +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +message SelfSubjectReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Status is filled in by the server with the user attributes. + optional SelfSubjectReviewStatus status = 2; +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +message SelfSubjectReviewStatus { + // User attributes of the user making this request. + // +optional + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; +} + // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator // plugin in the kube-apiserver. @@ -44,7 +65,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; @@ -66,6 +87,7 @@ message TokenReviewSpec { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic repeated string audiences = 2; } @@ -89,6 +111,7 @@ message TokenReviewStatus { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic repeated string audiences = 4; // Error indicates that the token couldn't be checked @@ -111,6 +134,7 @@ message UserInfo { // The names of groups this user is a part of. // +optional + // +listType=atomic repeated string groups = 3; // Any additional information provided by the authenticator. diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index ed23e50f7..075ee1263 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -44,6 +44,7 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectReview{}, &TokenReview{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 08e1e09b6..8038ef7d3 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -59,6 +60,7 @@ type TokenReviewSpec struct { // this list. If no audiences are provided, the audience will default to the // audience of the Kubernetes apiserver. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } @@ -80,6 +82,7 @@ type TokenReviewStatus struct { // status.audience field where status.authenticated is "true", the token is // valid against the audience of the Kubernetes API server. // +optional + // +listType=atomic Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional @@ -99,6 +102,7 @@ type UserInfo struct { UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` // The names of groups this user is a part of. // +optional + // +listType=atomic Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` // Any additional information provided by the authenticator. // +optional @@ -113,3 +117,29 @@ type ExtraValue []string func (t ExtraValue) String() string { return fmt.Sprintf("%v", []string(t)) } + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. +// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or +// request header authentication is used, any extra keys will have their case ignored and returned as lowercase. +type SelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Status is filled in by the server with the user attributes. + Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user. +type SelfSubjectReviewStatus struct { + // User attributes of the user making this request. + // +optional + UserInfo v1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"` +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 1086955c3..d6644f2cf 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,28 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_SelfSubjectReview = map[string]string{ + "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Status is filled in by the server with the user attributes.", +} + +func (SelfSubjectReview) SwaggerDoc() map[string]string { + return map_SelfSubjectReview +} + +var map_SelfSubjectReviewStatus = map[string]string{ + "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "userInfo": "User attributes of the user making this request.", +} + +func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string { + return map_SelfSubjectReviewStatus +} + var map_TokenReview = map[string]string{ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 059ec1a86..99ffadf7b 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -45,6 +45,50 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview. +func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview { + if in == nil { + return nil + } + out := new(SelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) { + *out = *in + in.UserInfo.DeepCopyInto(&out.UserInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus. +func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus { + if in == nil { + return nil + } + out := new(SelfSubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go index e448106e4..904796925 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,24 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *SelfSubjectReview) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *SelfSubjectReview) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index cf100e6b7..77e5a19c4 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=authorization.k8s.io package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index 66c7c06ae..aed9a3a47 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto +// source: k8s.io/api/authorization/v1/generated.proto package v1 @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -47,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{0} + return fileDescriptor_aafd0e5e70cec678, []int{0} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -72,10 +73,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *FieldSelectorAttributes) Reset() { *m = FieldSelectorAttributes{} } +func (*FieldSelectorAttributes) ProtoMessage() {} +func (*FieldSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{1} +} +func (m *FieldSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorAttributes.Merge(m, src) +} +func (m *FieldSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorAttributes proto.InternalMessageInfo + +func (m *LabelSelectorAttributes) Reset() { *m = LabelSelectorAttributes{} } +func (*LabelSelectorAttributes) ProtoMessage() {} +func (*LabelSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{2} +} +func (m *LabelSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorAttributes.Merge(m, src) +} +func (m *LabelSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorAttributes proto.InternalMessageInfo + func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{1} + return fileDescriptor_aafd0e5e70cec678, []int{3} } func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +160,7 @@ var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } func (*NonResourceAttributes) ProtoMessage() {} func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{2} + return fileDescriptor_aafd0e5e70cec678, []int{4} } func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +188,7 @@ var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } func (*NonResourceRule) ProtoMessage() {} func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{3} + return fileDescriptor_aafd0e5e70cec678, []int{5} } func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +216,7 @@ var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (*ResourceAttributes) ProtoMessage() {} func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{4} + return fileDescriptor_aafd0e5e70cec678, []int{6} } func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +244,7 @@ var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo func (m *ResourceRule) Reset() { *m = ResourceRule{} } func (*ResourceRule) ProtoMessage() {} func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{5} + return fileDescriptor_aafd0e5e70cec678, []int{7} } func (m *ResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +272,7 @@ var xxx_messageInfo_ResourceRule proto.InternalMessageInfo func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (*SelfSubjectAccessReview) ProtoMessage() {} func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{6} + return fileDescriptor_aafd0e5e70cec678, []int{8} } func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +300,7 @@ var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{7} + return fileDescriptor_aafd0e5e70cec678, []int{9} } func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +328,7 @@ var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } func (*SelfSubjectRulesReview) ProtoMessage() {} func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{8} + return fileDescriptor_aafd0e5e70cec678, []int{10} } func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +356,7 @@ var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{9} + return fileDescriptor_aafd0e5e70cec678, []int{11} } func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +384,7 @@ var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (*SubjectAccessReview) ProtoMessage() {} func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{10} + return fileDescriptor_aafd0e5e70cec678, []int{12} } func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +412,7 @@ var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{11} + return fileDescriptor_aafd0e5e70cec678, []int{13} } func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +440,7 @@ var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{12} + return fileDescriptor_aafd0e5e70cec678, []int{14} } func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +468,7 @@ var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{13} + return fileDescriptor_aafd0e5e70cec678, []int{15} } func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,6 +495,8 @@ var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") + proto.RegisterType((*FieldSelectorAttributes)(nil), "k8s.io.api.authorization.v1.FieldSelectorAttributes") + proto.RegisterType((*LabelSelectorAttributes)(nil), "k8s.io.api.authorization.v1.LabelSelectorAttributes") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") @@ -455,83 +514,89 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) -} - -var fileDescriptor_e50da13573e369bd = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xda, 0xd1, - 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, - 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, - 0xc2, 0xa9, 0x12, 0xff, 0x00, 0x47, 0x0e, 0x1c, 0xf8, 0x0f, 0xb8, 0x20, 0x71, 0xe3, 0xc0, 0x01, - 0xe5, 0xd8, 0x63, 0x91, 0x90, 0x45, 0x96, 0x33, 0xff, 0x03, 0x9a, 0xd9, 0xb1, 0x77, 0x9d, 0xac, - 0xdd, 0x84, 0x03, 0xbd, 0xf4, 0xb6, 0xfb, 0x3e, 0x9f, 0xf7, 0xe6, 0xcd, 0xfb, 0x35, 0x0f, 0x6c, - 0x1f, 0x6d, 0x32, 0xcb, 0xf5, 0xed, 0xa3, 0xb0, 0x49, 0xa8, 0x47, 0x38, 0x61, 0x76, 0x9f, 0x78, - 0x2d, 0x9f, 0xda, 0x0a, 0xc0, 0x81, 0x6b, 0xe3, 0x90, 0x77, 0x7c, 0xea, 0x7e, 0x8d, 0xb9, 0xeb, - 0x7b, 0x76, 0x7f, 0xdd, 0x6e, 0x13, 0x8f, 0x50, 0xcc, 0x49, 0xcb, 0x0a, 0xa8, 0xcf, 0x7d, 0x78, - 0x2b, 0x26, 0x5b, 0x38, 0x70, 0xad, 0x31, 0xb2, 0xd5, 0x5f, 0x5f, 0x79, 0xaf, 0xed, 0xf2, 0x4e, - 0xd8, 0xb4, 0x1c, 0xbf, 0x67, 0xb7, 0xfd, 0xb6, 0x6f, 0x4b, 0x9d, 0x66, 0x78, 0x28, 0xff, 0xe4, - 0x8f, 0xfc, 0x8a, 0x6d, 0xad, 0xdc, 0x4d, 0x0e, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x9e, 0xd8, - 0xc1, 0x51, 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x78, 0xb0, 0x62, 0x4f, 0xd2, 0xa2, 0xa1, - 0xc7, 0xdd, 0x1e, 0xb9, 0xa0, 0x70, 0xff, 0x55, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xaf, 0x67, - 0x6e, 0x00, 0xb0, 0xf3, 0x15, 0xa7, 0xf8, 0x00, 0x77, 0x43, 0x02, 0x6b, 0xa0, 0xe8, 0x72, 0xd2, - 0x63, 0x86, 0xb6, 0x9a, 0x5f, 0x2b, 0x37, 0xca, 0xd1, 0xa0, 0x56, 0xdc, 0x15, 0x02, 0x14, 0xcb, - 0x1f, 0x94, 0xbe, 0xfb, 0xa1, 0x96, 0x7b, 0xfe, 0xc7, 0x6a, 0xce, 0xfc, 0x49, 0x07, 0xc6, 0x23, - 0xdf, 0xc1, 0xdd, 0xbd, 0xb0, 0xf9, 0x05, 0x71, 0xf8, 0x96, 0xe3, 0x10, 0xc6, 0x10, 0xe9, 0xbb, - 0xe4, 0x18, 0x7e, 0x0e, 0x4a, 0xe2, 0x66, 0x2d, 0xcc, 0xb1, 0xa1, 0xad, 0x6a, 0x6b, 0x95, 0xfa, - 0xfb, 0x56, 0x12, 0xd3, 0x91, 0x83, 0x56, 0x70, 0xd4, 0x16, 0x02, 0x66, 0x09, 0xb6, 0xd5, 0x5f, - 0xb7, 0x3e, 0x92, 0xb6, 0x1e, 0x13, 0x8e, 0x1b, 0xf0, 0x74, 0x50, 0xcb, 0x45, 0x83, 0x1a, 0x48, - 0x64, 0x68, 0x64, 0x15, 0x1e, 0x80, 0x02, 0x0b, 0x88, 0x63, 0xe8, 0xd2, 0xfa, 0x5d, 0x6b, 0x4a, - 0xc6, 0xac, 0x0c, 0x0f, 0xf7, 0x02, 0xe2, 0x34, 0xae, 0xa9, 0x13, 0x0a, 0xe2, 0x0f, 0x49, 0x7b, - 0xf0, 0x33, 0x30, 0xc3, 0x38, 0xe6, 0x21, 0x33, 0xf2, 0xd2, 0xf2, 0xfd, 0x2b, 0x5b, 0x96, 0xda, - 0x8d, 0xff, 0x29, 0xdb, 0x33, 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc0, 0xd2, 0x13, 0xdf, 0x43, - 0x84, 0xf9, 0x21, 0x75, 0xc8, 0x16, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x0a, 0x0a, 0x01, - 0xe6, 0x1d, 0x19, 0xae, 0x72, 0xe2, 0xda, 0x53, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, - 0x29, 0xaf, 0x9c, 0x62, 0x1c, 0x10, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x04, 0xf3, 0x29, 0xe3, 0x28, - 0xec, 0xca, 0x8c, 0x0a, 0x68, 0x2c, 0xa3, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x82, 0x79, 0x2f, - 0xd1, 0xd9, 0x47, 0x8f, 0x98, 0xa1, 0x4b, 0xea, 0x62, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, - 0xe7, 0x9a, 0xbf, 0xe8, 0x00, 0x66, 0xdc, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, - 0xd4, 0x95, 0xae, 0x2b, 0x87, 0xcb, 0x4f, 0x86, 0x00, 0x4a, 0x38, 0xaf, 0xbe, 0x1c, 0x7c, 0x0b, - 0x14, 0xdb, 0xd4, 0x0f, 0x03, 0x99, 0x98, 0x72, 0x63, 0x4e, 0x51, 0x8a, 0x1f, 0x0a, 0x21, 0x8a, - 0x31, 0x78, 0x1b, 0xcc, 0xf6, 0x09, 0x65, 0xae, 0xef, 0x19, 0x05, 0x49, 0x9b, 0x57, 0xb4, 0xd9, - 0x83, 0x58, 0x8c, 0x86, 0x38, 0xbc, 0x03, 0x4a, 0x54, 0x39, 0x6e, 0x14, 0x25, 0x77, 0x41, 0x71, - 0x4b, 0xa3, 0x08, 0x8e, 0x18, 0xf0, 0x1e, 0xa8, 0xb0, 0xb0, 0x39, 0x52, 0x98, 0x91, 0x0a, 0x8b, - 0x4a, 0xa1, 0xb2, 0x97, 0x40, 0x28, 0xcd, 0x13, 0xd7, 0x12, 0x77, 0x34, 0x66, 0xc7, 0xaf, 0x25, - 0x42, 0x80, 0x24, 0x62, 0xfe, 0xaa, 0x81, 0x6b, 0x57, 0xcb, 0xd8, 0xbb, 0xa0, 0x8c, 0x03, 0x57, - 0x5e, 0x7b, 0x98, 0xab, 0x39, 0x11, 0xd7, 0xad, 0xa7, 0xbb, 0xb1, 0x10, 0x25, 0xb8, 0x20, 0x0f, - 0x9d, 0x11, 0x25, 0x3d, 0x22, 0x0f, 0x8f, 0x64, 0x28, 0xc1, 0xe1, 0x06, 0x98, 0x1b, 0xfe, 0xc8, - 0x24, 0x19, 0x05, 0xa9, 0x70, 0x3d, 0x1a, 0xd4, 0xe6, 0x50, 0x1a, 0x40, 0xe3, 0x3c, 0xf3, 0x67, - 0x1d, 0x2c, 0xef, 0x91, 0xee, 0xe1, 0xeb, 0x99, 0x05, 0xcf, 0xc6, 0x66, 0xc1, 0xe6, 0xf4, 0x8e, - 0xcd, 0xf6, 0xf2, 0xb5, 0xcd, 0x83, 0xef, 0x75, 0x70, 0x6b, 0x8a, 0x4f, 0xf0, 0x18, 0x40, 0x7a, - 0xa1, 0xbd, 0x54, 0x1c, 0xed, 0xa9, 0xbe, 0x5c, 0xec, 0xca, 0xc6, 0x8d, 0x68, 0x50, 0xcb, 0xe8, - 0x56, 0x94, 0x71, 0x04, 0xfc, 0x46, 0x03, 0x4b, 0x5e, 0xd6, 0xa4, 0x52, 0x61, 0xae, 0x4f, 0x3d, - 0x3c, 0x73, 0xc6, 0x35, 0x6e, 0x46, 0x83, 0x5a, 0xf6, 0xf8, 0x43, 0xd9, 0x67, 0x89, 0x57, 0xe6, - 0x46, 0x2a, 0x3c, 0xa2, 0x41, 0xfe, 0xbb, 0xba, 0xfa, 0x78, 0xac, 0xae, 0x36, 0x2e, 0x5b, 0x57, - 0x29, 0x27, 0x27, 0x96, 0xd5, 0xa7, 0xe7, 0xca, 0xea, 0xde, 0x65, 0xca, 0x2a, 0x6d, 0x78, 0x7a, - 0x55, 0x3d, 0x06, 0x2b, 0x93, 0x1d, 0xba, 0xf2, 0x70, 0x36, 0x7f, 0xd4, 0xc1, 0xe2, 0x9b, 0x67, - 0xfe, 0x2a, 0x6d, 0xfd, 0x5b, 0x01, 0x2c, 0xbf, 0x69, 0xe9, 0x49, 0x8b, 0x4e, 0xc8, 0x08, 0x55, - 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x50, 0x24, 0x11, 0x68, 0x82, 0x99, 0x76, 0xfc, 0xba, 0xc5, - 0xef, 0x0f, 0x10, 0x01, 0x56, 0x4f, 0x9b, 0x42, 0x60, 0x0b, 0x14, 0x89, 0xd8, 0x5b, 0x8d, 0xe2, - 0x6a, 0x7e, 0xad, 0x52, 0xff, 0xe0, 0xdf, 0x54, 0x86, 0x25, 0x37, 0xdf, 0x1d, 0x8f, 0xd3, 0x93, - 0x64, 0x9d, 0x90, 0x32, 0x14, 0x1b, 0x87, 0xff, 0x07, 0xf9, 0xd0, 0x6d, 0xa9, 0xd7, 0xbe, 0xa2, - 0x28, 0xf9, 0xfd, 0xdd, 0x6d, 0x24, 0xe4, 0x2b, 0x58, 0x2d, 0xcf, 0xd2, 0x04, 0x5c, 0x00, 0xf9, - 0x23, 0x72, 0x12, 0x37, 0x14, 0x12, 0x9f, 0xf0, 0x21, 0x28, 0xf6, 0xc5, 0x5e, 0xad, 0xe2, 0xfb, - 0xce, 0x54, 0x27, 0x93, 0x35, 0x1c, 0xc5, 0x5a, 0x0f, 0xf4, 0x4d, 0xcd, 0xfc, 0x5d, 0x03, 0x37, - 0x27, 0x96, 0x9f, 0x58, 0x77, 0x70, 0xb7, 0xeb, 0x1f, 0x93, 0x96, 0x3c, 0xb6, 0x94, 0xac, 0x3b, - 0x5b, 0xb1, 0x18, 0x0d, 0x71, 0xf8, 0x36, 0x98, 0x69, 0x11, 0xcf, 0x25, 0x2d, 0xb9, 0x18, 0x95, - 0x92, 0xca, 0xdd, 0x96, 0x52, 0xa4, 0x50, 0xc1, 0xa3, 0x04, 0x33, 0xdf, 0x53, 0xab, 0xd8, 0x88, - 0x87, 0xa4, 0x14, 0x29, 0x14, 0x6e, 0x81, 0x79, 0x22, 0xdc, 0x94, 0xfe, 0xef, 0x50, 0xea, 0x0f, - 0x33, 0xba, 0xac, 0x14, 0xe6, 0x77, 0xc6, 0x61, 0x74, 0x9e, 0x6f, 0xfe, 0xad, 0x03, 0x63, 0xd2, - 0x68, 0x83, 0x87, 0xc9, 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2f, 0xd5, 0x20, 0x42, - 0xa3, 0xb1, 0xa4, 0x1c, 0x99, 0x4b, 0x4b, 0x53, 0xab, 0x8b, 0xfc, 0x85, 0x14, 0x2c, 0x78, 0xe3, - 0x3b, 0x73, 0xbc, 0x54, 0x55, 0xea, 0x77, 0x2e, 0xdb, 0x0e, 0xf2, 0x34, 0x43, 0x9d, 0xb6, 0x70, - 0x0e, 0x60, 0xe8, 0x82, 0x7d, 0x58, 0x07, 0xc0, 0xf5, 0x1c, 0xbf, 0x17, 0x74, 0x09, 0x27, 0x32, - 0x6c, 0xa5, 0x64, 0x0e, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xc5, 0xbb, 0x70, 0xb5, 0x78, 0x37, - 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, - 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, - 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x99, 0x87, 0xb8, 0x24, - 0x47, 0x0f, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authorization/v1/generated.proto", fileDescriptor_aafd0e5e70cec678) +} + +var fileDescriptor_aafd0e5e70cec678 = []byte{ + // 1247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xfa, 0x47, 0x62, 0x8f, 0xe3, 0x6f, 0xd2, 0xc9, 0x37, 0xcd, 0x36, 0x11, 0x76, 0x64, + 0x24, 0x48, 0xd5, 0xb2, 0x26, 0x51, 0xdb, 0x44, 0x95, 0x0a, 0xf2, 0xaa, 0x01, 0x45, 0x4a, 0x4b, + 0x35, 0x51, 0x22, 0x51, 0x04, 0x62, 0xbc, 0x9e, 0xd8, 0x4b, 0xec, 0xdd, 0xed, 0xcc, 0xac, 0xd3, + 0x70, 0xaa, 0xc4, 0x3f, 0xc0, 0x91, 0x43, 0x0f, 0xfc, 0x07, 0x5c, 0x90, 0xb8, 0x73, 0x40, 0x11, + 0xa7, 0x1e, 0x8b, 0x84, 0x2c, 0x62, 0xce, 0xfc, 0x0f, 0x68, 0x66, 0xc7, 0xde, 0xdd, 0xc4, 0x76, + 0x6d, 0x0e, 0x94, 0x43, 0x6f, 0x9e, 0xf7, 0x79, 0xbf, 0xe7, 0xbd, 0xb7, 0x6f, 0x0c, 0x6e, 0x1c, + 0x6f, 0x33, 0xc3, 0x76, 0x2b, 0xd8, 0xb3, 0x2b, 0xd8, 0xe7, 0x4d, 0x97, 0xda, 0x5f, 0x63, 0x6e, + 0xbb, 0x4e, 0xa5, 0xb3, 0x51, 0x69, 0x10, 0x87, 0x50, 0xcc, 0x49, 0xdd, 0xf0, 0xa8, 0xcb, 0x5d, + 0xb8, 0x1a, 0x30, 0x1b, 0xd8, 0xb3, 0x8d, 0x18, 0xb3, 0xd1, 0xd9, 0x58, 0x79, 0xaf, 0x61, 0xf3, + 0xa6, 0x5f, 0x33, 0x2c, 0xb7, 0x5d, 0x69, 0xb8, 0x0d, 0xb7, 0x22, 0x65, 0x6a, 0xfe, 0x91, 0x3c, + 0xc9, 0x83, 0xfc, 0x15, 0xe8, 0x5a, 0xb9, 0x15, 0x1a, 0x6e, 0x63, 0xab, 0x69, 0x3b, 0x84, 0x9e, + 0x56, 0xbc, 0xe3, 0x86, 0x20, 0xb0, 0x4a, 0x9b, 0x70, 0x3c, 0xc4, 0x83, 0x95, 0xca, 0x28, 0x29, + 0xea, 0x3b, 0xdc, 0x6e, 0x93, 0x4b, 0x02, 0x77, 0x5e, 0x25, 0xc0, 0xac, 0x26, 0x69, 0xe3, 0x8b, + 0x72, 0xe5, 0x2d, 0x00, 0x76, 0x9e, 0x72, 0x8a, 0x0f, 0x71, 0xcb, 0x27, 0xb0, 0x04, 0x32, 0x36, + 0x27, 0x6d, 0xa6, 0x6b, 0x6b, 0xa9, 0xf5, 0x9c, 0x99, 0xeb, 0x75, 0x4b, 0x99, 0x5d, 0x41, 0x40, + 0x01, 0xfd, 0x6e, 0xf6, 0xbb, 0xef, 0x4b, 0x89, 0x67, 0xbf, 0xaf, 0x25, 0xca, 0xbf, 0x6a, 0x60, + 0xf9, 0x23, 0x9b, 0xb4, 0xea, 0xfb, 0xa4, 0x45, 0x2c, 0xee, 0xd2, 0x2a, 0xe7, 0xd4, 0xae, 0xf9, + 0x9c, 0x30, 0x78, 0x1b, 0xe4, 0x29, 0x3e, 0xe9, 0x03, 0xba, 0xb6, 0xa6, 0xad, 0xe7, 0xcc, 0xc5, + 0xb3, 0x6e, 0x29, 0xd1, 0xeb, 0x96, 0xf2, 0x28, 0x84, 0x50, 0x94, 0x0f, 0x3e, 0x05, 0x73, 0x94, + 0x3c, 0xf1, 0x6d, 0x4a, 0xda, 0xc4, 0xe1, 0x4c, 0x4f, 0xae, 0xa5, 0xd6, 0xf3, 0x9b, 0x1f, 0x18, + 0xe1, 0x6d, 0x0c, 0x42, 0x33, 0xbc, 0xe3, 0x86, 0x20, 0x30, 0x43, 0x64, 0xd0, 0xe8, 0x6c, 0x18, + 0x31, 0x5f, 0x50, 0xa8, 0xc6, 0xfc, 0xbf, 0xb2, 0x3b, 0x17, 0x21, 0x32, 0x14, 0xb3, 0x24, 0x83, + 0xd9, 0xc3, 0x35, 0xd2, 0xfa, 0x8f, 0x04, 0x13, 0xf3, 0x65, 0xda, 0x60, 0x7e, 0x4c, 0x02, 0x7d, + 0xcf, 0xb5, 0x70, 0x6b, 0xdf, 0xaf, 0x7d, 0x45, 0x2c, 0x5e, 0xb5, 0x2c, 0xc2, 0x18, 0x22, 0x1d, + 0x9b, 0x9c, 0xc0, 0x2f, 0x41, 0x56, 0x18, 0xa9, 0x63, 0x8e, 0x65, 0x28, 0xf9, 0xcd, 0xf7, 0x27, + 0x73, 0xe9, 0x13, 0xa9, 0xeb, 0x01, 0xe1, 0xd8, 0x84, 0xca, 0x09, 0x10, 0xd2, 0xd0, 0x40, 0x2b, + 0x3c, 0x04, 0x69, 0xe6, 0x11, 0x4b, 0x4f, 0x4a, 0xed, 0xb7, 0x8c, 0x31, 0xbd, 0x64, 0x0c, 0xf1, + 0x70, 0xdf, 0x23, 0x96, 0x39, 0xa7, 0x2c, 0xa4, 0xc5, 0x09, 0x49, 0x7d, 0xf0, 0x0b, 0x30, 0xc3, + 0x38, 0xe6, 0x3e, 0xd3, 0x53, 0x52, 0xf3, 0x9d, 0xa9, 0x35, 0x4b, 0x69, 0xf3, 0x7f, 0x4a, 0xf7, + 0x4c, 0x70, 0x46, 0x4a, 0x6b, 0xf9, 0x33, 0xb0, 0xf4, 0xd0, 0x75, 0x10, 0x61, 0xae, 0x4f, 0x2d, + 0x12, 0x29, 0x80, 0x35, 0x90, 0xf6, 0x30, 0x6f, 0xaa, 0x9b, 0x1f, 0xb8, 0xf6, 0x08, 0xf3, 0x26, + 0x92, 0x88, 0xe0, 0xe8, 0x10, 0x5a, 0x93, 0x21, 0x47, 0x38, 0x0e, 0x09, 0xad, 0x21, 0x89, 0x94, + 0x9f, 0x80, 0xf9, 0x88, 0x72, 0xe4, 0xb7, 0x64, 0xaf, 0x09, 0x28, 0xd6, 0x6b, 0x42, 0x82, 0xa1, + 0x80, 0x0e, 0xef, 0x81, 0x79, 0x27, 0x94, 0x39, 0x40, 0x7b, 0x41, 0x11, 0xe5, 0xcc, 0xc5, 0x5e, + 0xb7, 0x14, 0x55, 0x27, 0x20, 0x74, 0x91, 0xb7, 0xfc, 0x3c, 0x0d, 0xe0, 0x90, 0x68, 0x2a, 0x20, + 0xe7, 0xe0, 0x36, 0x61, 0x1e, 0xb6, 0x88, 0x0a, 0xe9, 0x8a, 0x72, 0x38, 0xf7, 0xb0, 0x0f, 0xa0, + 0x90, 0xe7, 0xd5, 0xc1, 0xc1, 0xb7, 0x41, 0xa6, 0x41, 0x5d, 0xdf, 0x93, 0x17, 0x93, 0x33, 0x0b, + 0x8a, 0x25, 0xf3, 0xb1, 0x20, 0xa2, 0x00, 0x83, 0xd7, 0xc1, 0x6c, 0x87, 0x50, 0x66, 0xbb, 0x8e, + 0x9e, 0x96, 0x6c, 0xf3, 0x8a, 0x6d, 0xf6, 0x30, 0x20, 0xa3, 0x3e, 0x0e, 0x6f, 0x82, 0x2c, 0x55, + 0x8e, 0xeb, 0x19, 0xc9, 0xbb, 0xa0, 0x78, 0xb3, 0x83, 0x0c, 0x0e, 0x38, 0x44, 0x7f, 0x32, 0xbf, + 0x36, 0x10, 0x98, 0x89, 0xf7, 0xe7, 0x7e, 0x08, 0xa1, 0x28, 0x9f, 0x08, 0x4b, 0xc4, 0xa8, 0xcf, + 0xc6, 0xc3, 0x12, 0x29, 0x40, 0x12, 0x81, 0x6d, 0x50, 0x38, 0x8a, 0x0e, 0x15, 0x3d, 0x3b, 0x41, + 0x45, 0x8f, 0x18, 0x89, 0xe6, 0x95, 0x5e, 0xb7, 0x54, 0x88, 0xcf, 0xa8, 0xb8, 0x76, 0x61, 0xae, + 0x15, 0x6d, 0x7b, 0x3d, 0x37, 0x81, 0xb9, 0x11, 0x43, 0x2b, 0x30, 0x17, 0x9f, 0x22, 0x71, 0xed, + 0xe5, 0x9f, 0x35, 0x30, 0x37, 0x5d, 0x3d, 0xde, 0x00, 0x39, 0xec, 0xd9, 0xf2, 0x52, 0xfb, 0x95, + 0x58, 0x10, 0x55, 0x53, 0x7d, 0xb4, 0x1b, 0x10, 0x51, 0x88, 0x0b, 0xe6, 0x7e, 0xaa, 0x45, 0xc3, + 0x0e, 0x98, 0xfb, 0x26, 0x19, 0x0a, 0x71, 0xb8, 0x05, 0x0a, 0xfd, 0x83, 0x2c, 0x41, 0x3d, 0x2d, + 0x05, 0x64, 0x10, 0x28, 0x0a, 0xa0, 0x38, 0x5f, 0xf9, 0xa7, 0x24, 0x58, 0xde, 0x27, 0xad, 0xa3, + 0xd7, 0x33, 0xe9, 0x1e, 0xc7, 0x26, 0xdd, 0xf6, 0xf8, 0x79, 0x34, 0xdc, 0xcb, 0xd7, 0x36, 0xed, + 0x9e, 0x27, 0xc1, 0xea, 0x18, 0x9f, 0xe0, 0x09, 0x80, 0xf4, 0xd2, 0xf0, 0x50, 0x79, 0xac, 0x8c, + 0xf5, 0xe5, 0xf2, 0xcc, 0x31, 0xaf, 0xf6, 0xba, 0xa5, 0x21, 0xb3, 0x08, 0x0d, 0x31, 0x01, 0xbf, + 0xd1, 0xc0, 0x92, 0x33, 0x6c, 0x0e, 0xab, 0x34, 0x6f, 0x8e, 0x35, 0x3e, 0x74, 0x82, 0x9b, 0xd7, + 0x7a, 0xdd, 0xd2, 0xf0, 0xe1, 0x8e, 0x86, 0xdb, 0x12, 0xdf, 0xd0, 0xab, 0x91, 0xf4, 0x88, 0x06, + 0xf9, 0xf7, 0xea, 0xea, 0xd3, 0x58, 0x5d, 0x6d, 0x4d, 0x5a, 0x57, 0x11, 0x27, 0x47, 0x96, 0xd5, + 0xe7, 0x17, 0xca, 0xea, 0xf6, 0x24, 0x65, 0x15, 0x55, 0x3c, 0xbe, 0xaa, 0x1e, 0x80, 0x95, 0xd1, + 0x0e, 0x4d, 0xfd, 0xe9, 0x29, 0xff, 0x90, 0x04, 0x8b, 0x6f, 0x96, 0x98, 0x69, 0xda, 0xfa, 0x97, + 0x34, 0x58, 0x7e, 0xd3, 0xd2, 0xa3, 0xd6, 0x38, 0x9f, 0x11, 0xaa, 0x96, 0x94, 0xc1, 0xe5, 0x1c, + 0x30, 0x42, 0x91, 0x44, 0x60, 0x19, 0xcc, 0x34, 0x82, 0xaf, 0x5b, 0xf0, 0xfd, 0x01, 0x22, 0xc1, + 0xea, 0xd3, 0xa6, 0x10, 0x58, 0x07, 0x19, 0x22, 0xde, 0x4b, 0x7a, 0x46, 0xee, 0xf3, 0x1f, 0xfe, + 0x93, 0xca, 0x30, 0xe4, 0x8b, 0x6b, 0xc7, 0xe1, 0xf4, 0x34, 0x5c, 0x96, 0x24, 0x0d, 0x05, 0xca, + 0xe1, 0x5b, 0x20, 0xe5, 0xdb, 0x75, 0xb5, 0xcb, 0xe4, 0x15, 0x4b, 0xea, 0x60, 0xf7, 0x3e, 0x12, + 0xf4, 0x15, 0xac, 0x1e, 0x6d, 0x52, 0x05, 0x5c, 0x00, 0xa9, 0x63, 0x72, 0x1a, 0x34, 0x14, 0x12, + 0x3f, 0xe1, 0x3d, 0x90, 0xe9, 0x88, 0xf7, 0x9c, 0xca, 0xef, 0xbb, 0x63, 0x9d, 0x0c, 0x9f, 0x7f, + 0x28, 0x90, 0xba, 0x9b, 0xdc, 0xd6, 0xca, 0xbf, 0x69, 0xe0, 0xda, 0xc8, 0xf2, 0x13, 0xcb, 0x1c, + 0x6e, 0xb5, 0xdc, 0x13, 0x52, 0x97, 0x66, 0xb3, 0xe1, 0x32, 0x57, 0x0d, 0xc8, 0xa8, 0x8f, 0xc3, + 0x77, 0xc0, 0x4c, 0x9d, 0x38, 0x36, 0xa9, 0xcb, 0xb5, 0x2f, 0x1b, 0x56, 0xee, 0x7d, 0x49, 0x45, + 0x0a, 0x15, 0x7c, 0x94, 0x60, 0xe6, 0x3a, 0x6a, 0xd1, 0x1c, 0xf0, 0x21, 0x49, 0x45, 0x0a, 0x85, + 0x55, 0x30, 0x4f, 0x84, 0x9b, 0xd2, 0xff, 0x1d, 0x4a, 0xdd, 0xfe, 0x8d, 0x2e, 0x2b, 0x81, 0xf9, + 0x9d, 0x38, 0x8c, 0x2e, 0xf2, 0x97, 0xff, 0x4a, 0x02, 0x7d, 0xd4, 0x68, 0x83, 0x47, 0xe1, 0x2e, + 0x22, 0x41, 0xb9, 0x0e, 0xe5, 0x37, 0xaf, 0x4f, 0xd4, 0x20, 0x42, 0xc2, 0x5c, 0x52, 0x8e, 0x14, + 0xa2, 0xd4, 0xc8, 0xea, 0x22, 0x8f, 0x90, 0x82, 0x05, 0x27, 0xfe, 0x22, 0xe8, 0xbf, 0x11, 0x6f, + 0x4e, 0xda, 0x0e, 0xd2, 0x9a, 0xae, 0xac, 0x2d, 0x5c, 0x00, 0x18, 0xba, 0xa4, 0x1f, 0x6e, 0x02, + 0x60, 0x3b, 0x96, 0xdb, 0xf6, 0x5a, 0x84, 0x13, 0x99, 0xb6, 0x6c, 0x38, 0x07, 0x77, 0x07, 0x08, + 0x8a, 0x70, 0x0d, 0xcb, 0x77, 0x7a, 0xba, 0x7c, 0x9b, 0xd5, 0xb3, 0xf3, 0x62, 0xe2, 0xc5, 0x79, + 0x31, 0xf1, 0xf2, 0xbc, 0x98, 0x78, 0xd6, 0x2b, 0x6a, 0x67, 0xbd, 0xa2, 0xf6, 0xa2, 0x57, 0xd4, + 0x5e, 0xf6, 0x8a, 0xda, 0x1f, 0xbd, 0xa2, 0xf6, 0xed, 0x9f, 0xc5, 0xc4, 0xe3, 0xd5, 0x31, 0xff, + 0xd0, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x8c, 0x77, 0x0f, 0xbf, 0x11, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -566,6 +631,90 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LabelSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -713,6 +862,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1278,6 +1451,40 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *FieldSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *LocalSubjectAccessReview) Size() (n int) { if m == nil { return 0 @@ -1347,6 +1554,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1537,6 +1752,38 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *FieldSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]FieldSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&FieldSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]LabelSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&LabelSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} func (this *LocalSubjectAccessReview) String() string { if this == nil { return "nil" @@ -1583,6 +1830,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(this.FieldSelector.String(), "FieldSelectorAttributes", "FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(this.LabelSelector.String(), "LabelSelectorAttributes", "LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -1808,6 +2057,238 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.FieldSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.LabelSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2438,6 +2919,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index 0170ee11f..37b05b855 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -26,7 +26,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/authorization/v1"; // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true @@ -37,6 +37,60 @@ message ExtraValue { repeated string items = 1; } +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message FieldSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement requirements = 2; +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message LabelSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement requirements = 2; +} + // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. @@ -44,7 +98,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -69,11 +123,13 @@ message NonResourceAttributes { // NonResourceRule holds information that describes a rule for the non-resource message NonResourceRule { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic repeated string verbs = 1; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic repeated string nonResourceURLs = 2; } @@ -109,26 +165,44 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, // may contain duplicates, and possibly be incomplete. message ResourceRule { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic repeated string resourceNames = 4; } @@ -139,7 +213,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -171,7 +245,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -192,7 +266,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; @@ -220,6 +294,7 @@ message SubjectAccessReviewSpec { // Groups is the groups you're testing for. // +optional + // +listType=atomic repeated string groups = 4; // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer @@ -262,10 +337,12 @@ message SubjectAccessReviewStatus { message SubjectRulesReviewStatus { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated ResourceRule resourceRules = 1; // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated NonResourceRule nonResourceRules = 2; // Incomplete is true when the rules returned by this call are incomplete. This is most commonly diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index d1fe483f9..36f5fa410 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -26,6 +26,7 @@ import ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { @@ -47,6 +48,7 @@ type SubjectAccessReview struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able @@ -69,6 +71,7 @@ type SelfSubjectAccessReview struct { // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions @@ -115,6 +118,72 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type LabelSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.LabelSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` +} + +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type FieldSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.FieldSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface @@ -143,6 +212,7 @@ type SubjectAccessReviewSpec struct { User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Groups is the groups you're testing for. // +optional + // +listType=atomic Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer // it needs a reflection here. @@ -197,6 +267,7 @@ type SubjectAccessReviewStatus struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. // The returned list of actions may be incomplete depending on the server's authorization mode, @@ -232,9 +303,11 @@ type SelfSubjectRulesReviewSpec struct { type SubjectRulesReviewStatus struct { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` // Incomplete is true when the rules returned by this call are incomplete. This is most commonly // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. @@ -250,28 +323,34 @@ type SubjectRulesReviewStatus struct { // may contain duplicates, and possibly be incomplete. type ResourceRule struct { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` } // NonResourceRule holds information that describes a rule for the non-resource type NonResourceRule struct { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` } diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 2e5fbea7a..dc6b8a89e 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -24,9 +24,29 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FieldSelectorAttributes = map[string]string{ + "": "FieldSelectorAttributes indicates a field limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a field selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (FieldSelectorAttributes) SwaggerDoc() map[string]string { + return map_FieldSelectorAttributes +} + +var map_LabelSelectorAttributes = map[string]string{ + "": "LabelSelectorAttributes indicates a label limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a label selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (LabelSelectorAttributes) SwaggerDoc() map[string]string { + return map_LabelSelectorAttributes +} + var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -59,14 +79,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index f1d49eb38..7f040f5c5 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -45,6 +46,52 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorAttributes) DeepCopyInto(out *FieldSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.FieldSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorAttributes. +func (in *FieldSelectorAttributes) DeepCopy() *FieldSelectorAttributes { + if in == nil { + return nil + } + out := new(FieldSelectorAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorAttributes) DeepCopyInto(out *LabelSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorAttributes. +func (in *LabelSelectorAttributes) DeepCopy() *LabelSelectorAttributes { + if in == nil { + return nil + } + out := new(LabelSelectorAttributes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in @@ -118,6 +165,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +258,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +356,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b0c0475b4 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LocalSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectRulesReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 4331d3e5b..5007d1b49 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto +// source: k8s.io/api/authorization/v1beta1/generated.proto package v1beta1 @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/authorization/v1" math "math" math_bits "math/bits" @@ -47,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{0} + return fileDescriptor_8eab727787743457, []int{0} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +76,7 @@ var xxx_messageInfo_ExtraValue proto.InternalMessageInfo func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{1} + return fileDescriptor_8eab727787743457, []int{1} } func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +104,7 @@ var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } func (*NonResourceAttributes) ProtoMessage() {} func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{2} + return fileDescriptor_8eab727787743457, []int{2} } func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +132,7 @@ var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } func (*NonResourceRule) ProtoMessage() {} func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{3} + return fileDescriptor_8eab727787743457, []int{3} } func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +160,7 @@ var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (*ResourceAttributes) ProtoMessage() {} func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{4} + return fileDescriptor_8eab727787743457, []int{4} } func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +188,7 @@ var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo func (m *ResourceRule) Reset() { *m = ResourceRule{} } func (*ResourceRule) ProtoMessage() {} func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{5} + return fileDescriptor_8eab727787743457, []int{5} } func (m *ResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +216,7 @@ var xxx_messageInfo_ResourceRule proto.InternalMessageInfo func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (*SelfSubjectAccessReview) ProtoMessage() {} func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{6} + return fileDescriptor_8eab727787743457, []int{6} } func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +244,7 @@ var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{7} + return fileDescriptor_8eab727787743457, []int{7} } func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +272,7 @@ var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } func (*SelfSubjectRulesReview) ProtoMessage() {} func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{8} + return fileDescriptor_8eab727787743457, []int{8} } func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +300,7 @@ var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{9} + return fileDescriptor_8eab727787743457, []int{9} } func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +328,7 @@ var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (*SubjectAccessReview) ProtoMessage() {} func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{10} + return fileDescriptor_8eab727787743457, []int{10} } func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +356,7 @@ var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{11} + return fileDescriptor_8eab727787743457, []int{11} } func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +384,7 @@ var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{12} + return fileDescriptor_8eab727787743457, []int{12} } func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +412,7 @@ var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{13} + return fileDescriptor_8eab727787743457, []int{13} } func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -455,83 +456,86 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) -} - -var fileDescriptor_43130d8376f09103 = []byte{ - // 1141 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, - 0x05, 0xd1, 0xee, 0x92, 0xa8, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, 0x44, - 0xc9, 0x81, 0x4a, 0xc0, 0xec, 0x7a, 0x62, 0x2f, 0xb6, 0x77, 0x97, 0x99, 0x59, 0x87, 0x20, 0x0e, - 0x3d, 0x72, 0xe4, 0xc8, 0x91, 0x13, 0xdf, 0x81, 0x0b, 0x12, 0x9c, 0x72, 0xec, 0x31, 0x48, 0xc8, - 0x22, 0xcb, 0x87, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, - 0xdb, 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0xde, 0x9b, 0xf7, 0xde, 0x3e, 0xb0, 0xdb, 0x79, 0xc8, 0x0c, - 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, - 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, - 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, - 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xfd, 0x96, 0xc3, 0xdb, 0x81, - 0x65, 0xd8, 0x5e, 0xcf, 0x6c, 0x79, 0x2d, 0xcf, 0x94, 0x8a, 0x56, 0x70, 0x28, 0x4f, 0xf2, 0x20, - 0xbf, 0x22, 0x83, 0x2b, 0x0f, 0x62, 0x17, 0x7a, 0xd8, 0x6e, 0x3b, 0x2e, 0xa1, 0xc7, 0xa6, 0xdf, - 0x69, 0x09, 0x01, 0x33, 0x7b, 0x84, 0x63, 0xb3, 0x7f, 0xc1, 0x8d, 0x15, 0xf3, 0x32, 0x2d, 0x1a, - 0xb8, 0xdc, 0xe9, 0x91, 0x0b, 0x0a, 0x1f, 0x4e, 0x52, 0x60, 0x76, 0x9b, 0xf4, 0xf0, 0x79, 0xbd, - 0xfa, 0x06, 0x00, 0x3b, 0xdf, 0x72, 0x8a, 0x0f, 0x70, 0x37, 0x20, 0xb0, 0x0a, 0x0a, 0x0e, 0x27, - 0x3d, 0xa6, 0x6b, 0xb5, 0xdc, 0x6a, 0xa9, 0x51, 0x0a, 0x07, 0xd5, 0xc2, 0xae, 0x10, 0xa0, 0x48, - 0xbe, 0x59, 0xfc, 0xe9, 0xe7, 0x6a, 0xe6, 0xc5, 0x5f, 0xb5, 0x4c, 0xfd, 0xb7, 0x2c, 0xd0, 0x1f, - 0x7b, 0x36, 0xee, 0xee, 0x05, 0xd6, 0xd7, 0xc4, 0xe6, 0x5b, 0xb6, 0x4d, 0x18, 0x43, 0xa4, 0xef, - 0x90, 0x23, 0xf8, 0x15, 0x28, 0x8a, 0xc8, 0x9a, 0x98, 0x63, 0x5d, 0xab, 0x69, 0xab, 0xe5, 0xf5, - 0xf7, 0x8d, 0x38, 0xb1, 0x23, 0x07, 0x0d, 0xbf, 0xd3, 0x12, 0x02, 0x66, 0x08, 0xb6, 0xd1, 0x5f, - 0x33, 0x3e, 0x93, 0xb6, 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, - 0x65, 0x68, 0x64, 0x15, 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xeb, 0x1f, 0x19, 0x93, - 0x9e, 0xcd, 0x48, 0x71, 0x73, 0xcf, 0x27, 0x76, 0xe3, 0x96, 0xba, 0x26, 0x2f, 0x4e, 0x48, 0x1a, - 0x85, 0x36, 0x98, 0x61, 0x1c, 0xf3, 0x80, 0xe9, 0x39, 0x69, 0xfe, 0xe3, 0x9b, 0x99, 0x97, 0x26, - 0x1a, 0x6f, 0xa8, 0x0b, 0x66, 0xa2, 0x33, 0x52, 0xa6, 0xeb, 0xcf, 0xc1, 0xd2, 0x53, 0xcf, 0x45, - 0x84, 0x79, 0x01, 0xb5, 0xc9, 0x16, 0xe7, 0xd4, 0xb1, 0x02, 0x4e, 0x18, 0xac, 0x81, 0xbc, 0x8f, - 0x79, 0x5b, 0x26, 0xae, 0x14, 0xfb, 0xf7, 0x0c, 0xf3, 0x36, 0x92, 0x88, 0x60, 0xf4, 0x09, 0xb5, - 0x64, 0xf0, 0x09, 0xc6, 0x01, 0xa1, 0x16, 0x92, 0x48, 0xfd, 0x1b, 0x30, 0x9f, 0x30, 0x8e, 0x82, - 0xae, 0x7c, 0x5b, 0x01, 0x8d, 0xbd, 0xad, 0xd0, 0x60, 0x28, 0x92, 0xc3, 0x47, 0x60, 0xde, 0x8d, - 0x75, 0xf6, 0xd1, 0x63, 0xa6, 0x67, 0x25, 0x75, 0x31, 0x1c, 0x54, 0x93, 0xe6, 0x04, 0x84, 0xce, - 0x73, 0x45, 0x41, 0xc0, 0x94, 0x68, 0x4c, 0x50, 0x72, 0x71, 0x8f, 0x30, 0x1f, 0xdb, 0x44, 0x85, - 0x74, 0x5b, 0x39, 0x5c, 0x7a, 0x3a, 0x04, 0x50, 0xcc, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, - 0xd4, 0x0b, 0x7c, 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0xa2, 0x14, 0x3e, 0x15, 0x42, 0x14, 0x61, 0xf0, - 0x5d, 0x30, 0xdb, 0x27, 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0x6d, 0x5e, 0xd1, 0x66, 0x0f, 0x22, - 0x31, 0x1a, 0xe2, 0xf0, 0x1e, 0x28, 0x52, 0xe5, 0xb8, 0x5e, 0x90, 0xdc, 0x05, 0xc5, 0x2d, 0x8e, - 0x32, 0x38, 0x62, 0xc0, 0x0f, 0x40, 0x99, 0x05, 0xd6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, - 0xf2, 0x5e, 0x0c, 0xa1, 0x24, 0x4f, 0x84, 0x25, 0x62, 0xd4, 0x67, 0xc7, 0xc3, 0x12, 0x29, 0x40, - 0x12, 0xa9, 0xff, 0xa1, 0x81, 0x5b, 0xd3, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x86, 0x3d, - 0x7c, 0xab, 0x39, 0x91, 0xd7, 0xad, 0x67, 0xbb, 0x91, 0x10, 0xc5, 0xb8, 0x20, 0x0f, 0x9d, 0x11, - 0x75, 0x3d, 0x22, 0x0f, 0xaf, 0x64, 0x28, 0xc6, 0xe1, 0x06, 0x98, 0x1b, 0x1e, 0xe4, 0x23, 0xe9, - 0x79, 0xa9, 0x70, 0x3b, 0x1c, 0x54, 0xe7, 0x50, 0x12, 0x40, 0xe3, 0xbc, 0xfa, 0xef, 0x59, 0xb0, - 0xbc, 0x47, 0xba, 0x87, 0xaf, 0x66, 0x2a, 0x7c, 0x39, 0x36, 0x15, 0x1e, 0x5d, 0xa3, 0x6d, 0xd3, - 0x5d, 0x7d, 0xb5, 0x93, 0xe1, 0x97, 0x2c, 0x78, 0xf3, 0x0a, 0xc7, 0xe0, 0xf7, 0x00, 0xd2, 0x0b, - 0x8d, 0xa6, 0x32, 0xfa, 0x60, 0xb2, 0x43, 0x17, 0x9b, 0xb4, 0x71, 0x27, 0x1c, 0x54, 0x53, 0x9a, - 0x17, 0xa5, 0xdc, 0x03, 0x7f, 0xd0, 0xc0, 0x92, 0x9b, 0x36, 0xb8, 0x54, 0xd6, 0x37, 0x26, 0x7b, - 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xa2, 0xf4, 0x0b, 0xc5, 0xc8, 0xb9, - 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xaf, 0xd6, 0xbe, 0x18, 0xab, 0xb5, 0x4f, 0xa6, 0xaa, 0xb5, - 0x84, 0xa7, 0x97, 0x96, 0x9a, 0x75, 0xae, 0xd4, 0x36, 0xaf, 0x5d, 0x6a, 0x49, 0xeb, 0x57, 0x57, - 0xda, 0x13, 0xb0, 0x72, 0xb9, 0x57, 0x53, 0x8f, 0xee, 0xfa, 0xaf, 0x59, 0xb0, 0xf8, 0x7a, 0x1d, - 0xb8, 0x59, 0xd3, 0x9f, 0xe6, 0xc1, 0xf2, 0xeb, 0x86, 0xbf, 0xba, 0xe1, 0xc5, 0x4f, 0x34, 0x60, - 0x84, 0xaa, 0x1f, 0xff, 0xe8, 0xad, 0xf6, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x36, 0xdc, 0x0d, 0xa2, - 0x1f, 0x16, 0x10, 0x99, 0x56, 0xff, 0x42, 0xb5, 0x18, 0x38, 0xa0, 0x40, 0xc4, 0xc6, 0xab, 0x17, - 0x6a, 0xb9, 0xd5, 0xf2, 0xfa, 0xf6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, 0x8e, 0xcb, 0xe9, 0x71, - 0xbc, 0x83, 0x48, 0x19, 0x8a, 0x6e, 0x80, 0x6f, 0x81, 0x5c, 0xe0, 0x34, 0xd5, 0x8a, 0x50, 0x56, - 0x94, 0xdc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, - 0x3a, 0xe4, 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0xfa, 0x62, 0x2d, 0x57, 0x79, 0xbe, - 0x37, 0xd9, 0xd3, 0x78, 0x95, 0x47, 0x91, 0xea, 0x66, 0xf6, 0xa1, 0x56, 0xff, 0x53, 0x03, 0x77, - 0x2f, 0x2d, 0x48, 0xb1, 0x28, 0xe1, 0x6e, 0xd7, 0x3b, 0x22, 0x4d, 0x79, 0x77, 0x31, 0x5e, 0x94, - 0xb6, 0x22, 0x31, 0x1a, 0xe2, 0xf0, 0x1d, 0x30, 0xd3, 0x24, 0xae, 0x43, 0x9a, 0x72, 0xa5, 0x2a, - 0xc6, 0xb5, 0xbc, 0x2d, 0xa5, 0x48, 0xa1, 0x82, 0x47, 0x09, 0x66, 0x9e, 0xab, 0x96, 0xb8, 0x11, - 0x0f, 0x49, 0x29, 0x52, 0x28, 0xdc, 0x02, 0xf3, 0x44, 0xb8, 0x29, 0x83, 0xd8, 0xa1, 0xd4, 0x1b, - 0xbe, 0xec, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0x7e, 0xfd, 0xdf, 0x2c, 0xd0, 0x2f, - 0x1b, 0x7b, 0xb0, 0x13, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xdd, 0xb8, 0x7e, 0xcb, 0x08, - 0xb5, 0xc6, 0x92, 0xf2, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, - 0xbe, 0x72, 0x47, 0x3b, 0x59, 0x79, 0x7d, 0x6d, 0xaa, 0x06, 0x91, 0x57, 0xea, 0xea, 0xca, 0x85, - 0x73, 0x00, 0x43, 0x17, 0x2e, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, - 0x26, 0xb0, 0x18, 0x4f, 0xcb, 0xdd, 0x11, 0x82, 0x12, 0xac, 0xb4, 0xcc, 0xe7, 0xa7, 0xcb, 0x7c, - 0xe3, 0xfe, 0xc9, 0x59, 0x25, 0xf3, 0xf2, 0xac, 0x92, 0x39, 0x3d, 0xab, 0x64, 0x5e, 0x84, 0x15, - 0xed, 0x24, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x1a, 0x56, 0xb4, 0xbf, 0xc3, 0x8a, 0xf6, 0xe3, - 0x3f, 0x95, 0xcc, 0xe7, 0xb3, 0x2a, 0xc2, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc9, 0xa5, - 0x34, 0xa4, 0x0f, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_8eab727787743457) +} + +var fileDescriptor_8eab727787743457 = []byte{ + // 1192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0xf3, 0xa7, 0x4d, 0x26, 0x1b, 0xda, 0x9d, 0xaa, 0x5b, 0x6f, 0x11, 0x49, 0x14, 0x24, + 0x54, 0xb4, 0x8b, 0xb3, 0xad, 0x0a, 0x5d, 0x0a, 0x7b, 0xa8, 0xd5, 0x2e, 0xaa, 0xd4, 0x5d, 0x56, + 0x53, 0xb5, 0x07, 0x56, 0x02, 0x26, 0xce, 0x34, 0x31, 0x75, 0x6c, 0xe3, 0x19, 0xa7, 0x14, 0x71, + 0xd8, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x70, 0xe2, 0x3b, 0x70, 0x41, 0x82, 0x53, 0x8f, 0x7b, 0x2c, + 0x12, 0x8a, 0xa8, 0xf9, 0x10, 0x5c, 0xd1, 0x8c, 0x27, 0xb1, 0x9d, 0xba, 0x4d, 0xdb, 0x03, 0x7b, + 0xd9, 0x5b, 0xe6, 0xfd, 0x7e, 0xef, 0xcf, 0xbc, 0x79, 0xf3, 0xfc, 0x26, 0xe0, 0xc1, 0xe1, 0x43, + 0xaa, 0x99, 0x4e, 0x13, 0xbb, 0x66, 0x13, 0xfb, 0xac, 0xeb, 0x78, 0xe6, 0xb7, 0x98, 0x99, 0x8e, + 0xdd, 0xec, 0x2f, 0xb7, 0x08, 0xc3, 0xcb, 0xcd, 0x0e, 0xb1, 0x89, 0x87, 0x19, 0x69, 0x6b, 0xae, + 0xe7, 0x30, 0x07, 0xd6, 0x43, 0x0d, 0x0d, 0xbb, 0xa6, 0x96, 0xd0, 0xd0, 0xa4, 0xc6, 0xe2, 0x7b, + 0x1d, 0x93, 0x75, 0xfd, 0x96, 0x66, 0x38, 0xbd, 0x66, 0xc7, 0xe9, 0x38, 0x4d, 0xa1, 0xd8, 0xf2, + 0x0f, 0xc4, 0x4a, 0x2c, 0xc4, 0xaf, 0xd0, 0xe0, 0xe2, 0xbd, 0x4b, 0x42, 0x18, 0xf7, 0xbe, 0xb8, + 0x1a, 0x91, 0x7b, 0xd8, 0xe8, 0x9a, 0x36, 0xf1, 0x8e, 0x9b, 0xee, 0x61, 0x87, 0x0b, 0x68, 0xb3, + 0x47, 0x18, 0x4e, 0xd3, 0x6a, 0x5e, 0xa4, 0xe5, 0xf9, 0x36, 0x33, 0x7b, 0xe4, 0x9c, 0xc2, 0x07, + 0x93, 0x14, 0xa8, 0xd1, 0x25, 0x3d, 0x3c, 0xae, 0xd7, 0x58, 0x03, 0x60, 0xeb, 0x1b, 0xe6, 0xe1, + 0x7d, 0x6c, 0xf9, 0x04, 0xd6, 0x40, 0xc1, 0x64, 0xa4, 0x47, 0x55, 0xa5, 0x9e, 0x5b, 0x2a, 0xe9, + 0xa5, 0x60, 0x50, 0x2b, 0x6c, 0x73, 0x01, 0x0a, 0xe5, 0xeb, 0xc5, 0x1f, 0x7f, 0xae, 0x65, 0x5e, + 0xfc, 0x55, 0xcf, 0x34, 0x7e, 0xcb, 0x02, 0x75, 0xc7, 0x31, 0xb0, 0xb5, 0xeb, 0xb7, 0xbe, 0x22, + 0x06, 0xdb, 0x30, 0x0c, 0x42, 0x29, 0x22, 0x7d, 0x93, 0x1c, 0xc1, 0x2f, 0x41, 0x91, 0xef, 0xac, + 0x8d, 0x19, 0x56, 0x95, 0xba, 0xb2, 0x54, 0x5e, 0x79, 0xa0, 0x45, 0xa7, 0x30, 0x0a, 0x50, 0x73, + 0x0f, 0x3b, 0x5c, 0x40, 0x35, 0xce, 0xd6, 0xfa, 0xcb, 0xda, 0xa7, 0xc2, 0xd6, 0x13, 0xc2, 0xb0, + 0x0e, 0x4f, 0x06, 0xb5, 0x4c, 0x30, 0xa8, 0x81, 0x48, 0x86, 0x46, 0x56, 0xe1, 0x73, 0x90, 0xa7, + 0x2e, 0x31, 0xd4, 0xac, 0xb0, 0xfe, 0xa1, 0x36, 0xe9, 0x8c, 0xb5, 0x94, 0x30, 0x77, 0x5d, 0x62, + 0xe8, 0xb7, 0xa4, 0x9b, 0x3c, 0x5f, 0x21, 0x61, 0x14, 0x1a, 0x60, 0x8a, 0x32, 0xcc, 0x7c, 0xaa, + 0xe6, 0x84, 0xf9, 0x8f, 0x6e, 0x66, 0x5e, 0x98, 0xd0, 0xdf, 0x90, 0x0e, 0xa6, 0xc2, 0x35, 0x92, + 0xa6, 0x1b, 0xcf, 0xc1, 0xfc, 0x53, 0xc7, 0x46, 0x84, 0x3a, 0xbe, 0x67, 0x90, 0x0d, 0xc6, 0x3c, + 0xb3, 0xe5, 0x33, 0x42, 0x61, 0x1d, 0xe4, 0x5d, 0xcc, 0xba, 0x22, 0x71, 0xa5, 0x28, 0xbe, 0x67, + 0x98, 0x75, 0x91, 0x40, 0x38, 0xa3, 0x4f, 0xbc, 0x96, 0xd8, 0x7c, 0x8c, 0xb1, 0x4f, 0xbc, 0x16, + 0x12, 0x48, 0xe3, 0x6b, 0x30, 0x13, 0x33, 0x8e, 0x7c, 0x4b, 0x9c, 0x2d, 0x87, 0x12, 0x67, 0xcb, + 0x35, 0x28, 0x0a, 0xe5, 0xf0, 0x11, 0x98, 0xb1, 0x23, 0x9d, 0x3d, 0xb4, 0x43, 0xd5, 0xac, 0xa0, + 0xce, 0x05, 0x83, 0x5a, 0xdc, 0x1c, 0x87, 0xd0, 0x38, 0xb7, 0xf1, 0x53, 0x1e, 0xc0, 0x94, 0xdd, + 0x34, 0x41, 0xc9, 0xc6, 0x3d, 0x42, 0x5d, 0x6c, 0x10, 0xb9, 0xa5, 0xdb, 0x32, 0xe0, 0xd2, 0xd3, + 0x21, 0x80, 0x22, 0xce, 0xe4, 0xcd, 0xc1, 0xb7, 0x41, 0xa1, 0xe3, 0x39, 0xbe, 0x2b, 0x4e, 0xa7, + 0xa4, 0x57, 0x24, 0xa5, 0xf0, 0x09, 0x17, 0xa2, 0x10, 0x83, 0xef, 0x82, 0xe9, 0x3e, 0xf1, 0xa8, + 0xe9, 0xd8, 0x6a, 0x5e, 0xd0, 0x66, 0x24, 0x6d, 0x7a, 0x3f, 0x14, 0xa3, 0x21, 0x0e, 0xef, 0x83, + 0xa2, 0x27, 0x03, 0x57, 0x0b, 0x82, 0x3b, 0x2b, 0xb9, 0xc5, 0x51, 0x06, 0x47, 0x0c, 0xf8, 0x3e, + 0x28, 0x53, 0xbf, 0x35, 0x52, 0x98, 0x12, 0x0a, 0x73, 0x52, 0xa1, 0xbc, 0x1b, 0x41, 0x28, 0xce, + 0xe3, 0xdb, 0xe2, 0x7b, 0x54, 0xa7, 0x93, 0xdb, 0xe2, 0x29, 0x40, 0x02, 0x81, 0x3d, 0x50, 0x39, + 0x30, 0x89, 0xd5, 0xde, 0x25, 0x16, 0x31, 0x98, 0xe3, 0xa9, 0x45, 0x51, 0x7c, 0xab, 0x97, 0x15, + 0x9f, 0xf6, 0x38, 0xae, 0x11, 0xa5, 0x5d, 0xbf, 0x1d, 0x0c, 0x6a, 0x95, 0x04, 0x88, 0x92, 0xd6, + 0xb9, 0x3b, 0x0b, 0xb7, 0x88, 0x35, 0x72, 0x57, 0xba, 0x82, 0xbb, 0x9d, 0xb8, 0xc6, 0xb8, 0xbb, + 0x04, 0x88, 0x92, 0xd6, 0x1b, 0x7f, 0x28, 0xe0, 0xd6, 0xf5, 0xea, 0xf1, 0x1e, 0x28, 0x61, 0xd7, + 0x14, 0x87, 0x3a, 0xac, 0xc4, 0x0a, 0xaf, 0x9a, 0x8d, 0x67, 0xdb, 0xa1, 0x10, 0x45, 0x38, 0x27, + 0x0f, 0x53, 0xcd, 0x6f, 0xed, 0x88, 0x3c, 0x74, 0x49, 0x51, 0x84, 0xc3, 0x35, 0x50, 0x19, 0x2e, + 0x44, 0x09, 0xaa, 0x79, 0xa1, 0x20, 0x36, 0x81, 0xe2, 0x00, 0x4a, 0xf2, 0x1a, 0xbf, 0x67, 0xc1, + 0xc2, 0x2e, 0xb1, 0x0e, 0x5e, 0x4d, 0xcf, 0xfb, 0x22, 0xd1, 0xf3, 0x1e, 0x5d, 0xa1, 0x29, 0xa5, + 0x87, 0xfa, 0x6a, 0xfb, 0xde, 0x2f, 0x59, 0xf0, 0xe6, 0x25, 0x81, 0xc1, 0xef, 0x00, 0xf4, 0xce, + 0xb5, 0x11, 0x99, 0xd1, 0xd5, 0xc9, 0x01, 0x9d, 0x6f, 0x41, 0xfa, 0x9d, 0x60, 0x50, 0x4b, 0x69, + 0x4d, 0x28, 0xc5, 0x0f, 0xfc, 0x5e, 0x01, 0xf3, 0x76, 0x5a, 0x5b, 0x96, 0x59, 0x5f, 0x9b, 0x1c, + 0x41, 0x6a, 0x57, 0xd7, 0xef, 0x06, 0x83, 0x5a, 0x7a, 0xc3, 0x47, 0xe9, 0x0e, 0xf9, 0x17, 0xf6, + 0x4e, 0x2c, 0x51, 0xfc, 0xd2, 0xfc, 0x7f, 0xb5, 0xf6, 0x79, 0xa2, 0xd6, 0x3e, 0xbe, 0x56, 0xad, + 0xc5, 0x22, 0xbd, 0xb0, 0xd4, 0x5a, 0x63, 0xa5, 0xb6, 0x7e, 0xe5, 0x52, 0x8b, 0x5b, 0xbf, 0xbc, + 0xd2, 0x9e, 0x80, 0xc5, 0x8b, 0xa3, 0xba, 0xf6, 0x87, 0xa9, 0xf1, 0x6b, 0x16, 0xcc, 0xbd, 0x1e, + 0x76, 0x6e, 0x76, 0xe9, 0x4f, 0xf3, 0x60, 0xe1, 0xf5, 0x85, 0xbf, 0xfc, 0xc2, 0xf3, 0x11, 0xc1, + 0xa7, 0xc4, 0x93, 0x63, 0xcd, 0xe8, 0xac, 0xf6, 0x28, 0xf1, 0x90, 0x40, 0x60, 0x7d, 0x38, 0xf9, + 0x84, 0x1f, 0x2c, 0xc0, 0x33, 0x2d, 0xbf, 0x85, 0x72, 0xec, 0x31, 0x41, 0x81, 0xf0, 0x79, 0x5e, + 0x2d, 0xd4, 0x73, 0x4b, 0xe5, 0x95, 0xcd, 0x1b, 0xd7, 0x8a, 0x26, 0x9e, 0x05, 0x5b, 0x36, 0xf3, + 0x8e, 0xa3, 0x09, 0x4b, 0xc8, 0x50, 0xe8, 0x01, 0xbe, 0x05, 0x72, 0xbe, 0xd9, 0x96, 0x03, 0x50, + 0x59, 0x52, 0x72, 0x7b, 0xdb, 0x9b, 0x88, 0xcb, 0x17, 0x0f, 0xe4, 0xcb, 0x42, 0x98, 0x80, 0xb3, + 0x20, 0x77, 0x48, 0x8e, 0xc3, 0x7b, 0x86, 0xf8, 0x4f, 0xa8, 0x83, 0x42, 0x9f, 0x3f, 0x3a, 0x64, + 0x9e, 0xef, 0x4f, 0x8e, 0x34, 0x7a, 0xa8, 0xa0, 0x50, 0x75, 0x3d, 0xfb, 0x50, 0x69, 0xfc, 0xa9, + 0x80, 0xbb, 0x17, 0x16, 0x24, 0x1f, 0x03, 0xb1, 0x65, 0x39, 0x47, 0xa4, 0x2d, 0x7c, 0x17, 0xa3, + 0x31, 0x70, 0x23, 0x14, 0xa3, 0x21, 0x0e, 0xdf, 0x01, 0x53, 0x6d, 0x62, 0x9b, 0xa4, 0x2d, 0x06, + 0xc6, 0x62, 0x54, 0xcb, 0x9b, 0x42, 0x8a, 0x24, 0xca, 0x79, 0x1e, 0xc1, 0xd4, 0xb1, 0xe5, 0x88, + 0x3a, 0xe2, 0x21, 0x21, 0x45, 0x12, 0x85, 0x1b, 0x60, 0x86, 0xf0, 0x30, 0xc5, 0x26, 0xb6, 0x3c, + 0xcf, 0x19, 0x9e, 0xec, 0x82, 0x54, 0x98, 0xd9, 0x4a, 0xc2, 0x68, 0x9c, 0xdf, 0xf8, 0x37, 0x0b, + 0xd4, 0x8b, 0xda, 0x1e, 0x3c, 0x8c, 0xa6, 0x18, 0x01, 0x8a, 0x41, 0xaa, 0xbc, 0xa2, 0x5d, 0xfd, + 0xca, 0x70, 0x35, 0x7d, 0x5e, 0x46, 0x53, 0x89, 0x4b, 0x63, 0x93, 0x8f, 0x58, 0xc2, 0x23, 0x30, + 0x6b, 0x27, 0x1f, 0x14, 0xe1, 0x4c, 0x56, 0x5e, 0x59, 0xbe, 0xd6, 0x05, 0x11, 0x2e, 0x55, 0xe9, + 0x72, 0x76, 0x0c, 0xa0, 0xe8, 0x9c, 0x13, 0xb8, 0x02, 0x80, 0x69, 0x1b, 0x4e, 0xcf, 0xb5, 0x08, + 0x23, 0x22, 0x81, 0xc5, 0xa8, 0x5b, 0x6e, 0x8f, 0x10, 0x14, 0x63, 0xa5, 0x65, 0x3e, 0x7f, 0xbd, + 0xcc, 0xeb, 0x8f, 0x4f, 0xce, 0xaa, 0x99, 0x97, 0x67, 0xd5, 0xcc, 0xe9, 0x59, 0x35, 0xf3, 0x22, + 0xa8, 0x2a, 0x27, 0x41, 0x55, 0x79, 0x19, 0x54, 0x95, 0xd3, 0xa0, 0xaa, 0xfc, 0x1d, 0x54, 0x95, + 0x1f, 0xfe, 0xa9, 0x66, 0x3e, 0xab, 0x4f, 0xfa, 0x33, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x46, 0xf7, 0xe0, 0x3d, 0xaf, 0x10, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -713,6 +717,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1347,6 +1375,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1583,6 +1619,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(fmt.Sprintf("%v", this.FieldSelector), "FieldSelectorAttributes", "v11.FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelectorAttributes", "v11.LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -2438,6 +2476,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &v11.FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v11.LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 4b1a55e0c..8738768b8 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -21,12 +21,13 @@ syntax = "proto2"; package k8s.io.api.authorization.v1beta1; +import "k8s.io/api/authorization/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/authorization/v1beta1"; // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true @@ -44,7 +45,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -69,11 +70,13 @@ message NonResourceAttributes { // NonResourceRule holds information that describes a rule for the non-resource message NonResourceRule { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic repeated string verbs = 1; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic repeated string nonResourceURLs = 2; } @@ -109,26 +112,38 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, // may contain duplicates, and possibly be incomplete. message ResourceRule { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic repeated string resourceNames = 4; } @@ -139,7 +154,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -171,7 +186,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -192,7 +207,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; @@ -220,6 +235,7 @@ message SubjectAccessReviewSpec { // Groups is the groups you're testing for. // +optional + // +listType=atomic repeated string group = 4; // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer @@ -262,10 +278,12 @@ message SubjectAccessReviewStatus { message SubjectRulesReviewStatus { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated ResourceRule resourceRules = 1; // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic repeated NonResourceRule nonResourceRules = 2; // Incomplete is true when the rules returned by this call are incomplete. This is most commonly diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index 265309865..8b8e5a986 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -124,6 +125,12 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + FieldSelector *authorizationv1.FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + LabelSelector *authorizationv1.LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface @@ -152,6 +159,7 @@ type SubjectAccessReviewSpec struct { User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Groups is the groups you're testing for. // +optional + // +listType=atomic Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"` // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer // it needs a reflection here. @@ -244,9 +252,11 @@ type SelfSubjectRulesReviewSpec struct { type SubjectRulesReviewStatus struct { // ResourceRules is the list of actions the subject is allowed to perform on resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + // +listType=atomic NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` // Incomplete is true when the rules returned by this call are incomplete. This is most commonly // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. @@ -262,28 +272,34 @@ type SubjectRulesReviewStatus struct { // may contain duplicates, and possibly be incomplete. type ResourceRule struct { // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` } // NonResourceRule holds information that describes a rule for the non-resource type NonResourceRule struct { // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, // final step in the path. "*" means all. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` } diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 2d291189e..bb1352a2d 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_LocalSubjectAccessReview = map[string]string{ @@ -59,14 +59,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index 13f09cf2d..d76993dba 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/authorization/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -118,6 +119,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(v1.FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +212,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +310,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 8c9c09b5c..d64c9cbc1 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index a6ff299d7..3e3c23135 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto +// source: k8s.io/api/autoscaling/v1/generated.proto package v1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{0} + return fileDescriptor_1972394c0c7aac8b, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{1} + return fileDescriptor_1972394c0c7aac8b, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{2} + return fileDescriptor_1972394c0c7aac8b, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{3} + return fileDescriptor_1972394c0c7aac8b, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{4} + return fileDescriptor_1972394c0c7aac8b, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{5} + return fileDescriptor_1972394c0c7aac8b, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{6} + return fileDescriptor_1972394c0c7aac8b, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{7} + return fileDescriptor_1972394c0c7aac8b, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{8} + return fileDescriptor_1972394c0c7aac8b, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{9} + return fileDescriptor_1972394c0c7aac8b, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{10} + return fileDescriptor_1972394c0c7aac8b, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{11} + return fileDescriptor_1972394c0c7aac8b, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{12} + return fileDescriptor_1972394c0c7aac8b, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{13} + return fileDescriptor_1972394c0c7aac8b, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{14} + return fileDescriptor_1972394c0c7aac8b, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{15} + return fileDescriptor_1972394c0c7aac8b, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{16} + return fileDescriptor_1972394c0c7aac8b, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{17} + return fileDescriptor_1972394c0c7aac8b, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +554,7 @@ var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{18} + return fileDescriptor_1972394c0c7aac8b, []int{18} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +582,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{19} + return fileDescriptor_1972394c0c7aac8b, []int{19} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +610,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{20} + return fileDescriptor_1972394c0c7aac8b, []int{20} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,112 +660,111 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) -} - -var fileDescriptor_2bb1f2101a7f10e2 = []byte{ - // 1605 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x70, 0xd3, 0xd6, - 0x16, 0x8e, 0x7f, 0x12, 0x92, 0xe3, 0x90, 0x9f, 0x0b, 0x0f, 0x4c, 0x78, 0x58, 0x19, 0x3d, 0x86, - 0xc9, 0x7b, 0xaf, 0x48, 0x8d, 0x4b, 0x19, 0xba, 0x8c, 0xdc, 0x52, 0x98, 0xc6, 0x10, 0x6e, 0x02, - 0xa5, 0xbf, 0xc3, 0x8d, 0x7c, 0x71, 0x44, 0x2c, 0xc9, 0x23, 0xc9, 0x1e, 0xc2, 0x0c, 0x33, 0xed, - 0xa2, 0xfb, 0x6e, 0x68, 0xb7, 0xed, 0x4c, 0xb7, 0x5d, 0xb3, 0xee, 0x8e, 0x25, 0x0b, 0x66, 0xca, - 0xca, 0x53, 0xd4, 0x45, 0x17, 0x5d, 0x75, 0xcb, 0xaa, 0xa3, 0xab, 0x2b, 0x59, 0xb2, 0x2d, 0xc5, - 0x71, 0x42, 0xa6, 0xed, 0xb0, 0xb3, 0x7c, 0xcf, 0xf9, 0xce, 0xbd, 0xe7, 0xff, 0x1c, 0x50, 0xb6, - 0x2f, 0xd9, 0x92, 0x66, 0xca, 0xdb, 0xad, 0x4d, 0x6a, 0x19, 0xd4, 0xa1, 0xb6, 0xdc, 0xa6, 0x46, - 0xcd, 0xb4, 0x64, 0x7e, 0x40, 0x9a, 0x9a, 0x4c, 0x5a, 0x8e, 0x69, 0xab, 0xa4, 0xa1, 0x19, 0x75, - 0xb9, 0xbd, 0x2c, 0xd7, 0xa9, 0x41, 0x2d, 0xe2, 0xd0, 0x9a, 0xd4, 0xb4, 0x4c, 0xc7, 0x44, 0xa7, - 0x7c, 0x52, 0x89, 0x34, 0x35, 0x29, 0x42, 0x2a, 0xb5, 0x97, 0x17, 0xce, 0xd7, 0x35, 0x67, 0xab, - 0xb5, 0x29, 0xa9, 0xa6, 0x2e, 0xd7, 0xcd, 0xba, 0x29, 0x33, 0x8e, 0xcd, 0xd6, 0x5d, 0xf6, 0xc5, - 0x3e, 0xd8, 0x2f, 0x1f, 0x69, 0x41, 0x8c, 0x08, 0x55, 0x4d, 0x8b, 0x0e, 0x90, 0xb6, 0x70, 0xa1, - 0x4b, 0xa3, 0x13, 0x75, 0x4b, 0x33, 0xa8, 0xb5, 0x23, 0x37, 0xb7, 0xeb, 0x8c, 0xc9, 0xa2, 0xb6, - 0xd9, 0xb2, 0x54, 0xba, 0x27, 0x2e, 0x5b, 0xd6, 0xa9, 0x43, 0x06, 0xc9, 0x92, 0x93, 0xb8, 0xac, - 0x96, 0xe1, 0x68, 0x7a, 0xbf, 0x98, 0x8b, 0xbb, 0x31, 0xd8, 0xea, 0x16, 0xd5, 0x49, 0x2f, 0x9f, - 0xf8, 0x5b, 0x16, 0xce, 0x54, 0x4c, 0xc3, 0x21, 0x1e, 0x07, 0xe6, 0x8f, 0xa8, 0x52, 0xc7, 0xd2, - 0xd4, 0x75, 0xf6, 0x1b, 0x55, 0x20, 0x6f, 0x10, 0x9d, 0x16, 0x33, 0x8b, 0x99, 0xa5, 0x29, 0x45, - 0x7e, 0xd2, 0x11, 0xc6, 0xdc, 0x8e, 0x90, 0xbf, 0x46, 0x74, 0xfa, 0xb2, 0x23, 0x08, 0xfd, 0x8a, - 0x93, 0x02, 0x18, 0x8f, 0x04, 0x33, 0x66, 0x74, 0x1b, 0x8a, 0x0e, 0xb1, 0xea, 0xd4, 0x59, 0x69, - 0x53, 0x8b, 0xd4, 0xe9, 0x4d, 0x47, 0x6b, 0x68, 0x0f, 0x88, 0xa3, 0x99, 0x46, 0x31, 0xbb, 0x98, - 0x59, 0x1a, 0x57, 0xfe, 0xed, 0x76, 0x84, 0xe2, 0x46, 0x02, 0x0d, 0x4e, 0xe4, 0x46, 0x6d, 0x40, - 0xb1, 0xb3, 0x5b, 0xa4, 0xd1, 0xa2, 0xc5, 0xdc, 0x62, 0x66, 0xa9, 0x50, 0x96, 0xa4, 0xae, 0x83, - 0x84, 0x5a, 0x91, 0x9a, 0xdb, 0x75, 0xe6, 0x31, 0x81, 0xc9, 0xa4, 0x1b, 0x2d, 0x62, 0x38, 0x9a, - 0xb3, 0xa3, 0x9c, 0x70, 0x3b, 0x02, 0xda, 0xe8, 0x43, 0xc3, 0x03, 0x24, 0x20, 0x19, 0xa6, 0xd4, - 0x40, 0x6f, 0xc5, 0x71, 0xa6, 0x9b, 0x79, 0xae, 0x9b, 0xa9, 0xae, 0x42, 0xbb, 0x34, 0xe2, 0x1f, - 0x29, 0x9a, 0x76, 0x88, 0xd3, 0xb2, 0x0f, 0x46, 0xd3, 0x9f, 0xc0, 0x29, 0xb5, 0x65, 0x59, 0xd4, - 0x48, 0x56, 0xf5, 0x19, 0xb7, 0x23, 0x9c, 0xaa, 0x24, 0x11, 0xe1, 0x64, 0x7e, 0xf4, 0x10, 0x8e, - 0xc5, 0x0f, 0xf7, 0xa3, 0xed, 0xd3, 0xfc, 0x81, 0xc7, 0x2a, 0xfd, 0x90, 0x78, 0x90, 0x9c, 0xb8, - 0xce, 0xf3, 0x43, 0xe8, 0xfc, 0x51, 0x06, 0x4e, 0x57, 0x2c, 0xd3, 0xb6, 0x6f, 0x51, 0xcb, 0xd6, - 0x4c, 0xe3, 0xfa, 0xe6, 0x3d, 0xaa, 0x3a, 0x98, 0xde, 0xa5, 0x16, 0x35, 0x54, 0x8a, 0x16, 0x21, - 0xbf, 0xad, 0x19, 0x35, 0xae, 0xf1, 0xe9, 0x40, 0xe3, 0x1f, 0x68, 0x46, 0x0d, 0xb3, 0x13, 0x8f, - 0x82, 0xd9, 0x24, 0x1b, 0xa7, 0x88, 0x28, 0xbc, 0x0c, 0x40, 0x9a, 0x1a, 0x17, 0xc0, 0x54, 0x31, - 0xa5, 0x20, 0x4e, 0x07, 0x2b, 0x6b, 0x57, 0xf9, 0x09, 0x8e, 0x50, 0x89, 0xdf, 0xe4, 0xe0, 0xf8, - 0x7b, 0xf7, 0x1d, 0x6a, 0x19, 0xa4, 0x11, 0x0b, 0xb6, 0x32, 0x80, 0xce, 0xbe, 0xaf, 0x75, 0x1d, - 0x21, 0x04, 0xab, 0x86, 0x27, 0x38, 0x42, 0x85, 0x4c, 0x98, 0xf1, 0xbf, 0xd6, 0x69, 0x83, 0xaa, - 0x8e, 0x69, 0xb1, 0xcb, 0x16, 0xca, 0x6f, 0xa5, 0xd9, 0xc3, 0x96, 0xbc, 0xd4, 0x23, 0xb5, 0x97, - 0xa5, 0x55, 0xb2, 0x49, 0x1b, 0x01, 0xab, 0x82, 0xdc, 0x8e, 0x30, 0x53, 0x8d, 0xc1, 0xe1, 0x1e, - 0x78, 0x44, 0xa0, 0xe0, 0x07, 0xc4, 0x7e, 0xac, 0x3f, 0xeb, 0x76, 0x84, 0xc2, 0x46, 0x17, 0x06, - 0x47, 0x31, 0x13, 0xa2, 0x3a, 0xff, 0xaa, 0xa3, 0x5a, 0xfc, 0xae, 0xdf, 0x30, 0x7e, 0x6c, 0xfe, - 0x2d, 0x0c, 0xb3, 0x05, 0xd3, 0x3c, 0x6c, 0xf6, 0x63, 0x99, 0xe3, 0xfc, 0x59, 0xd3, 0x95, 0x08, - 0x16, 0x8e, 0x21, 0xa3, 0x9d, 0xc1, 0x89, 0x60, 0x34, 0x03, 0x9d, 0xdc, 0x4b, 0x12, 0x10, 0x1f, - 0x67, 0xe1, 0xe4, 0x15, 0xd3, 0xd2, 0x1e, 0x78, 0x51, 0xde, 0x58, 0x33, 0x6b, 0x2b, 0xbc, 0xf2, - 0x53, 0x0b, 0xdd, 0x81, 0x49, 0x4f, 0x7b, 0x35, 0xe2, 0x10, 0x66, 0xa3, 0x42, 0xf9, 0xcd, 0xe1, - 0x74, 0xed, 0x27, 0x86, 0x2a, 0x75, 0x48, 0xd7, 0xaa, 0xdd, 0xff, 0x70, 0x88, 0x8a, 0x6e, 0x43, - 0xde, 0x6e, 0x52, 0x95, 0x5b, 0xf2, 0xa2, 0x94, 0xd8, 0x81, 0x48, 0x09, 0x77, 0x5c, 0x6f, 0x52, - 0xb5, 0x9b, 0x47, 0xbc, 0x2f, 0xcc, 0x10, 0xd1, 0x1d, 0x98, 0xb0, 0x99, 0xaf, 0x71, 0xb3, 0x5d, - 0x1a, 0x01, 0x9b, 0xf1, 0x2b, 0x33, 0x1c, 0x7d, 0xc2, 0xff, 0xc6, 0x1c, 0x57, 0xfc, 0x2a, 0x07, - 0x8b, 0x09, 0x9c, 0x15, 0xd3, 0xa8, 0x69, 0x2c, 0xc5, 0x5f, 0x81, 0xbc, 0xb3, 0xd3, 0x0c, 0x5c, - 0xfc, 0x42, 0x70, 0xd1, 0x8d, 0x9d, 0xa6, 0x57, 0x84, 0xce, 0xee, 0xc6, 0xef, 0xd1, 0x61, 0x86, - 0x80, 0x56, 0xc3, 0x07, 0x65, 0x63, 0x58, 0xfc, 0x5a, 0x2f, 0x3b, 0xc2, 0x80, 0xae, 0x4b, 0x0a, - 0x91, 0xe2, 0x97, 0xf7, 0x32, 0x42, 0x83, 0xd8, 0xce, 0x86, 0x45, 0x0c, 0xdb, 0x97, 0xa4, 0xe9, - 0x81, 0x87, 0xff, 0x6f, 0x38, 0x23, 0x7b, 0x1c, 0xca, 0x02, 0xbf, 0x05, 0x5a, 0xed, 0x43, 0xc3, - 0x03, 0x24, 0xa0, 0x73, 0x30, 0x61, 0x51, 0x62, 0x9b, 0x06, 0x2f, 0x38, 0xa1, 0x72, 0x31, 0xfb, - 0x17, 0xf3, 0x53, 0xf4, 0x5f, 0x38, 0xa2, 0x53, 0xdb, 0x26, 0x75, 0xca, 0xbb, 0x81, 0x59, 0x4e, - 0x78, 0xa4, 0xea, 0xff, 0x8d, 0x83, 0x73, 0xf1, 0x59, 0x06, 0x4e, 0x27, 0xe8, 0x71, 0x55, 0xb3, - 0x1d, 0xf4, 0x69, 0x9f, 0x17, 0x4b, 0x43, 0x66, 0x0c, 0xcd, 0xf6, 0x7d, 0x78, 0x8e, 0xcb, 0x9e, - 0x0c, 0xfe, 0x89, 0x78, 0xf0, 0x87, 0x30, 0xae, 0x39, 0x54, 0xf7, 0xac, 0x92, 0x5b, 0x2a, 0x94, - 0xcb, 0x7b, 0x77, 0x33, 0xe5, 0x28, 0x87, 0x1f, 0xbf, 0xea, 0x01, 0x61, 0x1f, 0x4f, 0xfc, 0x3d, - 0x9b, 0xf8, 0x2c, 0xcf, 0xcd, 0x51, 0x1b, 0x66, 0xd8, 0x97, 0x9f, 0x8a, 0x31, 0xbd, 0xcb, 0x1f, - 0x97, 0x16, 0x44, 0x29, 0xc5, 0x5b, 0x39, 0xc1, 0x6f, 0x31, 0xb3, 0x1e, 0x43, 0xc5, 0x3d, 0x52, - 0xd0, 0x32, 0x14, 0x74, 0xcd, 0xc0, 0xb4, 0xd9, 0xd0, 0x54, 0x62, 0xf3, 0x1e, 0x88, 0x95, 0x9f, - 0x6a, 0xf7, 0x6f, 0x1c, 0xa5, 0x41, 0x6f, 0x43, 0x41, 0x27, 0xf7, 0x43, 0x96, 0x1c, 0x63, 0x39, - 0xc6, 0xe5, 0x15, 0xaa, 0xdd, 0x23, 0x1c, 0xa5, 0x43, 0xf7, 0xa0, 0xe4, 0xd7, 0x94, 0xca, 0xda, - 0xcd, 0x48, 0xdb, 0xb4, 0x46, 0x2d, 0x95, 0x1a, 0x8e, 0xe7, 0x1a, 0x79, 0x86, 0x24, 0xba, 0x1d, - 0xa1, 0xb4, 0x91, 0x4a, 0x89, 0x77, 0x41, 0x12, 0x7f, 0xca, 0xc1, 0x99, 0xd4, 0x34, 0x80, 0x2e, - 0x03, 0x32, 0x37, 0x6d, 0x6a, 0xb5, 0x69, 0xed, 0x7d, 0xbf, 0xeb, 0xf7, 0x1a, 0x14, 0x4f, 0xe7, - 0x39, 0xbf, 0x26, 0x5e, 0xef, 0x3b, 0xc5, 0x03, 0x38, 0x90, 0x0a, 0x47, 0xbd, 0xb8, 0xf0, 0xb5, - 0xac, 0xf1, 0x5e, 0x68, 0x6f, 0x41, 0x37, 0xef, 0x76, 0x84, 0xa3, 0xab, 0x51, 0x10, 0x1c, 0xc7, - 0x44, 0x2b, 0x30, 0xcb, 0x93, 0x7d, 0x8f, 0xd6, 0x4f, 0x72, 0xad, 0xcf, 0x56, 0xe2, 0xc7, 0xb8, - 0x97, 0xde, 0x83, 0xa8, 0x51, 0x5b, 0xb3, 0x68, 0x2d, 0x84, 0xc8, 0xc7, 0x21, 0xde, 0x8d, 0x1f, - 0xe3, 0x5e, 0x7a, 0xa4, 0x83, 0xc0, 0x51, 0x13, 0x2d, 0x38, 0xce, 0x20, 0xff, 0xe3, 0x76, 0x04, - 0xa1, 0x92, 0x4e, 0x8a, 0x77, 0xc3, 0x12, 0x1f, 0xe5, 0x81, 0xf7, 0x0e, 0x2c, 0x40, 0x2e, 0xc4, - 0x52, 0xef, 0x62, 0x4f, 0xea, 0x9d, 0x8b, 0x36, 0x8a, 0x91, 0x34, 0x7b, 0x03, 0x26, 0x4c, 0x16, - 0x19, 0xdc, 0x2e, 0xe7, 0x53, 0xc2, 0x29, 0x2c, 0x69, 0x21, 0x90, 0x02, 0x5e, 0x2e, 0xe3, 0xa1, - 0xc5, 0x81, 0xd0, 0x55, 0xc8, 0x37, 0xcd, 0x5a, 0x50, 0x88, 0xfe, 0x9f, 0x02, 0xb8, 0x66, 0xd6, - 0xec, 0x18, 0xdc, 0xa4, 0x77, 0x63, 0xef, 0x5f, 0xcc, 0x20, 0xd0, 0x47, 0x30, 0x19, 0x14, 0x7c, - 0xde, 0x1d, 0xc8, 0x29, 0x70, 0x83, 0x06, 0x50, 0x65, 0xda, 0x4b, 0x64, 0xc1, 0x09, 0x0e, 0xe1, - 0xd0, 0x43, 0x98, 0x57, 0x7b, 0xe7, 0xa9, 0xe2, 0x91, 0x5d, 0x6b, 0x67, 0xea, 0xb4, 0xab, 0xfc, - 0xcb, 0xed, 0x08, 0xf3, 0x7d, 0x24, 0xb8, 0x5f, 0x92, 0xf7, 0x32, 0xca, 0x3b, 0x45, 0xe6, 0x14, - 0xe9, 0x2f, 0x1b, 0xd4, 0xed, 0xfb, 0x2f, 0x0b, 0x4e, 0x70, 0x08, 0x27, 0x7e, 0x9b, 0x87, 0xe9, - 0x58, 0xf7, 0x79, 0xc8, 0x9e, 0xe1, 0xb7, 0x11, 0x07, 0xe6, 0x19, 0x3e, 0xdc, 0x81, 0x7a, 0x86, - 0x0f, 0x79, 0x48, 0x9e, 0xe1, 0x0b, 0x3b, 0x24, 0xcf, 0x88, 0xbc, 0x6c, 0x80, 0x67, 0x3c, 0xcb, - 0x01, 0xea, 0x0f, 0x62, 0xf4, 0x39, 0x4c, 0xf8, 0xe5, 0x62, 0x9f, 0x25, 0x35, 0x6c, 0x6e, 0x78, - 0xf5, 0xe4, 0xa8, 0x3d, 0xd3, 0x4f, 0x76, 0xa8, 0xe9, 0x87, 0x1e, 0xc4, 0x94, 0x18, 0xd6, 0xdc, - 0xc4, 0x49, 0xf1, 0x33, 0x98, 0xb4, 0x83, 0xf1, 0x2a, 0x3f, 0xfa, 0x78, 0xc5, 0x14, 0x1e, 0x0e, - 0x56, 0x21, 0x24, 0xaa, 0xc1, 0x34, 0x89, 0x4e, 0x38, 0xe3, 0x23, 0x3d, 0x63, 0xce, 0x1b, 0xa7, - 0x62, 0xa3, 0x4d, 0x0c, 0x55, 0xfc, 0xb9, 0xd7, 0xac, 0x7e, 0xd8, 0xff, 0x15, 0xcd, 0x7a, 0x78, - 0x33, 0xe6, 0x3f, 0xc2, 0xb2, 0xdf, 0x67, 0x61, 0xae, 0xb7, 0x48, 0x8e, 0xb4, 0x4c, 0x78, 0x30, - 0x70, 0x23, 0x92, 0x1d, 0xe9, 0xd2, 0xe1, 0x0c, 0x34, 0xe4, 0xae, 0x33, 0x6a, 0x89, 0xdc, 0x81, - 0x5b, 0x42, 0xfc, 0x21, 0xae, 0xa3, 0xd1, 0x17, 0x2e, 0x09, 0xeb, 0xc9, 0xec, 0x21, 0xad, 0x27, - 0x5f, 0xb1, 0x9a, 0x7e, 0xcc, 0xc2, 0xf1, 0xd7, 0x1b, 0xfa, 0xe1, 0x77, 0x79, 0x8f, 0xfb, 0xf5, - 0xf5, 0x7a, 0xcf, 0x3e, 0xd4, 0x8a, 0xed, 0xcb, 0x2c, 0x8c, 0xb3, 0xd1, 0xec, 0x10, 0x16, 0x6a, - 0x97, 0x63, 0x0b, 0xb5, 0xb3, 0x29, 0x15, 0x8e, 0xdd, 0x28, 0x71, 0x7d, 0x76, 0xad, 0x67, 0x7d, - 0x76, 0x6e, 0x57, 0xa4, 0xf4, 0x65, 0xd9, 0x3b, 0x30, 0x15, 0x0a, 0x44, 0x6f, 0x78, 0xbd, 0x2a, - 0x9f, 0x29, 0x33, 0xcc, 0xb6, 0xe1, 0x86, 0x25, 0x1c, 0x26, 0x43, 0x0a, 0x51, 0x83, 0x42, 0x44, - 0xc2, 0xde, 0x98, 0x3d, 0x6a, 0x3b, 0xba, 0x2e, 0x9e, 0xea, 0x52, 0xf7, 0xe7, 0x04, 0x65, 0xe9, - 0xc9, 0x8b, 0xd2, 0xd8, 0xd3, 0x17, 0xa5, 0xb1, 0xe7, 0x2f, 0x4a, 0x63, 0x5f, 0xb8, 0xa5, 0xcc, - 0x13, 0xb7, 0x94, 0x79, 0xea, 0x96, 0x32, 0xcf, 0xdd, 0x52, 0xe6, 0x17, 0xb7, 0x94, 0xf9, 0xfa, - 0xd7, 0xd2, 0xd8, 0xc7, 0xd9, 0xf6, 0xf2, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x83, 0x99, - 0xe1, 0x70, 0x1d, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_1972394c0c7aac8b) +} + +var fileDescriptor_1972394c0c7aac8b = []byte{ + // 1593 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0x13, 0xd7, + 0x16, 0x8e, 0x7f, 0x12, 0x92, 0xe3, 0x90, 0x9f, 0x0b, 0x0f, 0x92, 0xf0, 0xf0, 0x44, 0xf3, 0x10, + 0x0a, 0xef, 0x3d, 0xc6, 0x8d, 0x4b, 0x11, 0x5d, 0x55, 0xb1, 0x5b, 0x0a, 0x6a, 0x0c, 0xe1, 0x26, + 0x50, 0xfa, 0x2b, 0x6e, 0xc6, 0x17, 0x67, 0x88, 0x67, 0xc6, 0x9a, 0x19, 0x5b, 0x04, 0x09, 0xa9, + 0x5d, 0x74, 0xdf, 0x0d, 0xed, 0xb6, 0x95, 0xba, 0xed, 0x9a, 0x75, 0x77, 0x2c, 0x59, 0x20, 0x95, + 0x95, 0x55, 0xa6, 0x8b, 0x2e, 0xba, 0xea, 0x96, 0x55, 0x75, 0xef, 0xdc, 0x19, 0xcf, 0xd8, 0x9e, + 0x89, 0xe3, 0x84, 0xa8, 0xad, 0xd8, 0x65, 0x7c, 0xcf, 0xf9, 0xce, 0xbd, 0xe7, 0xff, 0x9c, 0xc0, + 0xb9, 0xed, 0x4b, 0xb6, 0xa2, 0x99, 0x05, 0xd2, 0xd0, 0x0a, 0xa4, 0xe9, 0x98, 0xb6, 0x4a, 0xea, + 0x9a, 0x51, 0x2b, 0xb4, 0x96, 0x0b, 0x35, 0x6a, 0x50, 0x8b, 0x38, 0xb4, 0xaa, 0x34, 0x2c, 0xd3, + 0x31, 0xd1, 0xbc, 0x47, 0xaa, 0x90, 0x86, 0xa6, 0x84, 0x48, 0x95, 0xd6, 0xf2, 0xc2, 0xf9, 0x9a, + 0xe6, 0x6c, 0x35, 0x37, 0x15, 0xd5, 0xd4, 0x0b, 0x35, 0xb3, 0x66, 0x16, 0x38, 0xc7, 0x66, 0xf3, + 0x2e, 0xff, 0xe2, 0x1f, 0xfc, 0x2f, 0x0f, 0x69, 0x41, 0x0e, 0x09, 0x55, 0x4d, 0x8b, 0xf6, 0x91, + 0xb6, 0x70, 0xa1, 0x43, 0xa3, 0x13, 0x75, 0x4b, 0x33, 0xa8, 0xb5, 0x53, 0x68, 0x6c, 0xd7, 0x38, + 0x93, 0x45, 0x6d, 0xb3, 0x69, 0xa9, 0x74, 0x4f, 0x5c, 0x76, 0x41, 0xa7, 0x0e, 0xe9, 0x27, 0xab, + 0x10, 0xc7, 0x65, 0x35, 0x0d, 0x47, 0xd3, 0x7b, 0xc5, 0x5c, 0xdc, 0x8d, 0xc1, 0x56, 0xb7, 0xa8, + 0x4e, 0xba, 0xf9, 0xe4, 0xdf, 0xd2, 0x70, 0xba, 0x6c, 0x1a, 0x0e, 0x61, 0x1c, 0x58, 0x3c, 0xa2, + 0x42, 0x1d, 0x4b, 0x53, 0xd7, 0xf9, 0xdf, 0xa8, 0x0c, 0x59, 0x83, 0xe8, 0x74, 0x2e, 0xb5, 0x98, + 0x5a, 0x9a, 0x28, 0x15, 0x9e, 0xb4, 0xa5, 0x11, 0xb7, 0x2d, 0x65, 0xaf, 0x11, 0x9d, 0xbe, 0x6c, + 0x4b, 0x52, 0xaf, 0xe2, 0x14, 0x1f, 0x86, 0x91, 0x60, 0xce, 0x8c, 0x6e, 0xc3, 0x9c, 0x43, 0xac, + 0x1a, 0x75, 0x56, 0x5a, 0xd4, 0x22, 0x35, 0x7a, 0xd3, 0xd1, 0xea, 0xda, 0x03, 0xe2, 0x68, 0xa6, + 0x31, 0x97, 0x5e, 0x4c, 0x2d, 0x8d, 0x96, 0xfe, 0xed, 0xb6, 0xa5, 0xb9, 0x8d, 0x18, 0x1a, 0x1c, + 0xcb, 0x8d, 0x5a, 0x80, 0x22, 0x67, 0xb7, 0x48, 0xbd, 0x49, 0xe7, 0x32, 0x8b, 0xa9, 0xa5, 0x5c, + 0x51, 0x51, 0x3a, 0x0e, 0x12, 0x68, 0x45, 0x69, 0x6c, 0xd7, 0xb8, 0xc7, 0xf8, 0x26, 0x53, 0x6e, + 0x34, 0x89, 0xe1, 0x68, 0xce, 0x4e, 0xe9, 0x84, 0xdb, 0x96, 0xd0, 0x46, 0x0f, 0x1a, 0xee, 0x23, + 0x01, 0x15, 0x60, 0x42, 0xf5, 0xf5, 0x36, 0x37, 0xca, 0x75, 0x33, 0x2b, 0x74, 0x33, 0xd1, 0x51, + 0x68, 0x87, 0x46, 0xfe, 0x23, 0x41, 0xd3, 0x0e, 0x71, 0x9a, 0xf6, 0xc1, 0x68, 0xfa, 0x13, 0x98, + 0x57, 0x9b, 0x96, 0x45, 0x8d, 0x78, 0x55, 0x9f, 0x76, 0xdb, 0xd2, 0x7c, 0x39, 0x8e, 0x08, 0xc7, + 0xf3, 0xa3, 0x87, 0x70, 0x2c, 0x7a, 0xb8, 0x1f, 0x6d, 0x9f, 0x12, 0x0f, 0x3c, 0x56, 0xee, 0x85, + 0xc4, 0xfd, 0xe4, 0x44, 0x75, 0x9e, 0x1d, 0x40, 0xe7, 0x8f, 0x52, 0x70, 0xaa, 0x6c, 0x99, 0xb6, + 0x7d, 0x8b, 0x5a, 0xb6, 0x66, 0x1a, 0xd7, 0x37, 0xef, 0x51, 0xd5, 0xc1, 0xf4, 0x2e, 0xb5, 0xa8, + 0xa1, 0x52, 0xb4, 0x08, 0xd9, 0x6d, 0xcd, 0xa8, 0x0a, 0x8d, 0x4f, 0xfa, 0x1a, 0xff, 0x40, 0x33, + 0xaa, 0x98, 0x9f, 0x30, 0x0a, 0x6e, 0x93, 0x74, 0x94, 0x22, 0xa4, 0xf0, 0x22, 0x00, 0x69, 0x68, + 0x42, 0x00, 0x57, 0xc5, 0x44, 0x09, 0x09, 0x3a, 0x58, 0x59, 0xbb, 0x2a, 0x4e, 0x70, 0x88, 0x4a, + 0xfe, 0x26, 0x03, 0xc7, 0xdf, 0xbb, 0xef, 0x50, 0xcb, 0x20, 0xf5, 0x48, 0xb0, 0x15, 0x01, 0x74, + 0xfe, 0x7d, 0xad, 0xe3, 0x08, 0x01, 0x58, 0x25, 0x38, 0xc1, 0x21, 0x2a, 0x64, 0xc2, 0x94, 0xf7, + 0xb5, 0x4e, 0xeb, 0x54, 0x75, 0x4c, 0x8b, 0x5f, 0x36, 0x57, 0x7c, 0x33, 0xc9, 0x1e, 0xb6, 0xc2, + 0x52, 0x8f, 0xd2, 0x5a, 0x56, 0x56, 0xc9, 0x26, 0xad, 0xfb, 0xac, 0x25, 0xe4, 0xb6, 0xa5, 0xa9, + 0x4a, 0x04, 0x0e, 0x77, 0xc1, 0x23, 0x02, 0x39, 0x2f, 0x20, 0xf6, 0x63, 0xfd, 0x69, 0xb7, 0x2d, + 0xe5, 0x36, 0x3a, 0x30, 0x38, 0x8c, 0x19, 0x13, 0xd5, 0xd9, 0x57, 0x1d, 0xd5, 0xf2, 0x77, 0xbd, + 0x86, 0xf1, 0x62, 0xf3, 0x6f, 0x61, 0x98, 0x2d, 0x98, 0x14, 0x61, 0xb3, 0x1f, 0xcb, 0x1c, 0x17, + 0xcf, 0x9a, 0x2c, 0x87, 0xb0, 0x70, 0x04, 0x19, 0xed, 0xf4, 0x4f, 0x04, 0xc3, 0x19, 0xe8, 0xe4, + 0x5e, 0x92, 0x80, 0xfc, 0x38, 0x0d, 0x27, 0xaf, 0x98, 0x96, 0xf6, 0x80, 0x45, 0x79, 0x7d, 0xcd, + 0xac, 0xae, 0x88, 0xca, 0x4f, 0x2d, 0x74, 0x07, 0xc6, 0x99, 0xf6, 0xaa, 0xc4, 0x21, 0xdc, 0x46, + 0xb9, 0xe2, 0x1b, 0x83, 0xe9, 0xda, 0x4b, 0x0c, 0x15, 0xea, 0x90, 0x8e, 0x55, 0x3b, 0xbf, 0xe1, + 0x00, 0x15, 0xdd, 0x86, 0xac, 0xdd, 0xa0, 0xaa, 0xb0, 0xe4, 0x45, 0x25, 0xb6, 0x03, 0x51, 0x62, + 0xee, 0xb8, 0xde, 0xa0, 0x6a, 0x27, 0x8f, 0xb0, 0x2f, 0xcc, 0x11, 0xd1, 0x1d, 0x18, 0xb3, 0xb9, + 0xaf, 0x09, 0xb3, 0x5d, 0x1a, 0x02, 0x9b, 0xf3, 0x97, 0xa6, 0x04, 0xfa, 0x98, 0xf7, 0x8d, 0x05, + 0xae, 0xfc, 0x55, 0x06, 0x16, 0x63, 0x38, 0xcb, 0xa6, 0x51, 0xd5, 0x78, 0x8a, 0xbf, 0x02, 0x59, + 0x67, 0xa7, 0xe1, 0xbb, 0xf8, 0x05, 0xff, 0xa2, 0x1b, 0x3b, 0x0d, 0x56, 0x84, 0xce, 0xec, 0xc6, + 0xcf, 0xe8, 0x30, 0x47, 0x40, 0xab, 0xc1, 0x83, 0xd2, 0x11, 0x2c, 0x71, 0xad, 0x97, 0x6d, 0xa9, + 0x4f, 0xd7, 0xa5, 0x04, 0x48, 0xd1, 0xcb, 0xb3, 0x8c, 0x50, 0x27, 0xb6, 0xb3, 0x61, 0x11, 0xc3, + 0xf6, 0x24, 0x69, 0xba, 0xef, 0xe1, 0xff, 0x1d, 0xcc, 0xc8, 0x8c, 0xa3, 0xb4, 0x20, 0x6e, 0x81, + 0x56, 0x7b, 0xd0, 0x70, 0x1f, 0x09, 0xe8, 0x2c, 0x8c, 0x59, 0x94, 0xd8, 0xa6, 0x21, 0x0a, 0x4e, + 0xa0, 0x5c, 0xcc, 0x7f, 0xc5, 0xe2, 0x14, 0x9d, 0x83, 0x23, 0x3a, 0xb5, 0x6d, 0x52, 0xa3, 0xa2, + 0x1b, 0x98, 0x16, 0x84, 0x47, 0x2a, 0xde, 0xcf, 0xd8, 0x3f, 0x97, 0x9f, 0xa5, 0xe0, 0x54, 0x8c, + 0x1e, 0x57, 0x35, 0xdb, 0x41, 0x9f, 0xf6, 0x78, 0xb1, 0x32, 0x60, 0xc6, 0xd0, 0x6c, 0xcf, 0x87, + 0x67, 0x84, 0xec, 0x71, 0xff, 0x97, 0x90, 0x07, 0x7f, 0x08, 0xa3, 0x9a, 0x43, 0x75, 0x66, 0x95, + 0xcc, 0x52, 0xae, 0x58, 0xdc, 0xbb, 0x9b, 0x95, 0x8e, 0x0a, 0xf8, 0xd1, 0xab, 0x0c, 0x08, 0x7b, + 0x78, 0xf2, 0xef, 0xe9, 0xd8, 0x67, 0x31, 0x37, 0x47, 0x2d, 0x98, 0xe2, 0x5f, 0x5e, 0x2a, 0xc6, + 0xf4, 0xae, 0x78, 0x5c, 0x52, 0x10, 0x25, 0x14, 0xef, 0xd2, 0x09, 0x71, 0x8b, 0xa9, 0xf5, 0x08, + 0x2a, 0xee, 0x92, 0x82, 0x96, 0x21, 0xa7, 0x6b, 0x06, 0xa6, 0x8d, 0xba, 0xa6, 0x12, 0x5b, 0xf4, + 0x40, 0xbc, 0xfc, 0x54, 0x3a, 0x3f, 0xe3, 0x30, 0x0d, 0x7a, 0x0b, 0x72, 0x3a, 0xb9, 0x1f, 0xb0, + 0x64, 0x38, 0xcb, 0x31, 0x21, 0x2f, 0x57, 0xe9, 0x1c, 0xe1, 0x30, 0x1d, 0xba, 0x07, 0x79, 0xaf, + 0xa6, 0x94, 0xd7, 0x6e, 0x86, 0xda, 0xa6, 0x35, 0x6a, 0xa9, 0xd4, 0x70, 0x98, 0x6b, 0x64, 0x39, + 0x92, 0xec, 0xb6, 0xa5, 0xfc, 0x46, 0x22, 0x25, 0xde, 0x05, 0x49, 0xfe, 0x29, 0x03, 0xa7, 0x13, + 0xd3, 0x00, 0xba, 0x0c, 0xc8, 0xdc, 0xb4, 0xa9, 0xd5, 0xa2, 0xd5, 0xf7, 0xbd, 0xae, 0x9f, 0x35, + 0x28, 0x4c, 0xe7, 0x19, 0xaf, 0x26, 0x5e, 0xef, 0x39, 0xc5, 0x7d, 0x38, 0x90, 0x0a, 0x47, 0x59, + 0x5c, 0x78, 0x5a, 0xd6, 0x44, 0x2f, 0xb4, 0xb7, 0xa0, 0x9b, 0x75, 0xdb, 0xd2, 0xd1, 0xd5, 0x30, + 0x08, 0x8e, 0x62, 0xa2, 0x15, 0x98, 0x16, 0xc9, 0xbe, 0x4b, 0xeb, 0x27, 0x85, 0xd6, 0xa7, 0xcb, + 0xd1, 0x63, 0xdc, 0x4d, 0xcf, 0x20, 0xaa, 0xd4, 0xd6, 0x2c, 0x5a, 0x0d, 0x20, 0xb2, 0x51, 0x88, + 0x77, 0xa3, 0xc7, 0xb8, 0x9b, 0x1e, 0xe9, 0x20, 0x09, 0xd4, 0x58, 0x0b, 0x8e, 0x72, 0xc8, 0xff, + 0xb8, 0x6d, 0x49, 0x2a, 0x27, 0x93, 0xe2, 0xdd, 0xb0, 0xe4, 0x47, 0x59, 0x10, 0xbd, 0x03, 0x0f, + 0x90, 0x0b, 0x91, 0xd4, 0xbb, 0xd8, 0x95, 0x7a, 0x67, 0xc2, 0x8d, 0x62, 0x28, 0xcd, 0xde, 0x80, + 0x31, 0x93, 0x47, 0x86, 0xb0, 0xcb, 0xf9, 0x84, 0x70, 0x0a, 0x4a, 0x5a, 0x00, 0x54, 0x02, 0x96, + 0xcb, 0x44, 0x68, 0x09, 0x20, 0x74, 0x15, 0xb2, 0x0d, 0xb3, 0xea, 0x17, 0xa2, 0xff, 0x25, 0x00, + 0xae, 0x99, 0x55, 0x3b, 0x02, 0x37, 0xce, 0x6e, 0xcc, 0x7e, 0xc5, 0x1c, 0x02, 0x7d, 0x04, 0xe3, + 0x7e, 0xc1, 0x17, 0xdd, 0x41, 0x21, 0x01, 0xae, 0xdf, 0x00, 0x5a, 0x9a, 0x64, 0x89, 0xcc, 0x3f, + 0xc1, 0x01, 0x1c, 0x7a, 0x08, 0xb3, 0x6a, 0xf7, 0x3c, 0x35, 0x77, 0x64, 0xd7, 0xda, 0x99, 0x38, + 0xed, 0x96, 0xfe, 0xe5, 0xb6, 0xa5, 0xd9, 0x1e, 0x12, 0xdc, 0x2b, 0x89, 0xbd, 0x8c, 0x8a, 0x4e, + 0x91, 0x3b, 0x45, 0xf2, 0xcb, 0xfa, 0x75, 0xfb, 0xde, 0xcb, 0xfc, 0x13, 0x1c, 0xc0, 0xc9, 0xdf, + 0x66, 0x61, 0x32, 0xd2, 0x7d, 0x1e, 0xb2, 0x67, 0x78, 0x6d, 0xc4, 0x81, 0x79, 0x86, 0x07, 0x77, + 0xa0, 0x9e, 0xe1, 0x41, 0x1e, 0x92, 0x67, 0x78, 0xc2, 0x0e, 0xc9, 0x33, 0x42, 0x2f, 0xeb, 0xe3, + 0x19, 0xcf, 0x32, 0x80, 0x7a, 0x83, 0x18, 0x7d, 0x0e, 0x63, 0x5e, 0xb9, 0xd8, 0x67, 0x49, 0x0d, + 0x9a, 0x1b, 0x51, 0x3d, 0x05, 0x6a, 0xd7, 0xf4, 0x93, 0x1e, 0x68, 0xfa, 0xa1, 0x07, 0x31, 0x25, + 0x06, 0x35, 0x37, 0x76, 0x52, 0xfc, 0x0c, 0xc6, 0x6d, 0x7f, 0xbc, 0xca, 0x0e, 0x3f, 0x5e, 0x71, + 0x85, 0x07, 0x83, 0x55, 0x00, 0x89, 0xaa, 0x30, 0x49, 0xc2, 0x13, 0xce, 0xe8, 0x50, 0xcf, 0x98, + 0x61, 0xe3, 0x54, 0x64, 0xb4, 0x89, 0xa0, 0xca, 0x3f, 0x77, 0x9b, 0xd5, 0x0b, 0xfb, 0xbf, 0xa2, + 0x59, 0x0f, 0x6f, 0xc6, 0xfc, 0x47, 0x58, 0xf6, 0xfb, 0x34, 0xcc, 0x74, 0x17, 0xc9, 0xa1, 0x96, + 0x09, 0x0f, 0xfa, 0x6e, 0x44, 0xd2, 0x43, 0x5d, 0x3a, 0x98, 0x81, 0x06, 0xdc, 0x75, 0x86, 0x2d, + 0x91, 0x39, 0x70, 0x4b, 0xc8, 0x3f, 0x44, 0x75, 0x34, 0xfc, 0xc2, 0x25, 0x66, 0x3d, 0x99, 0x3e, + 0xa4, 0xf5, 0xe4, 0x2b, 0x56, 0xd3, 0x8f, 0x69, 0x38, 0xfe, 0x7a, 0x43, 0x3f, 0xf8, 0x2e, 0xef, + 0x71, 0xaf, 0xbe, 0x5e, 0xef, 0xd9, 0x07, 0x5a, 0xb1, 0x7d, 0x99, 0x86, 0x51, 0x3e, 0x9a, 0x1d, + 0xc2, 0x42, 0xed, 0x72, 0x64, 0xa1, 0x76, 0x26, 0xa1, 0xc2, 0xf1, 0x1b, 0xc5, 0xae, 0xcf, 0xae, + 0x75, 0xad, 0xcf, 0xce, 0xee, 0x8a, 0x94, 0xbc, 0x2c, 0x7b, 0x1b, 0x26, 0x02, 0x81, 0xe8, 0xff, + 0xac, 0x57, 0x15, 0x33, 0x65, 0x8a, 0xdb, 0x36, 0xd8, 0xb0, 0x04, 0xc3, 0x64, 0x40, 0x21, 0x6b, + 0x90, 0x0b, 0x49, 0xd8, 0x1b, 0x33, 0xa3, 0xb6, 0xc3, 0xeb, 0xe2, 0x89, 0x0e, 0x75, 0x6f, 0x4e, + 0x28, 0xbd, 0xf3, 0xe4, 0x45, 0x7e, 0xe4, 0xe9, 0x8b, 0xfc, 0xc8, 0xf3, 0x17, 0xf9, 0x91, 0x2f, + 0xdc, 0x7c, 0xea, 0x89, 0x9b, 0x4f, 0x3d, 0x75, 0xf3, 0xa9, 0xe7, 0x6e, 0x3e, 0xf5, 0x8b, 0x9b, + 0x4f, 0x7d, 0xfd, 0x6b, 0x7e, 0xe4, 0xe3, 0xf9, 0xd8, 0x7f, 0xa9, 0xfe, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd7, 0x67, 0xd4, 0x08, 0x6e, 0x1d, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 606a50da5..0a961312f 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/autoscaling/v1"; // ContainerResourceMetricSource indicates how to scale on a resource metric known to // Kubernetes, as specified in the requests and limits, describing a single container in @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target. optional string container = 5; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling taget optional string container = 4; @@ -87,13 +87,13 @@ message ContainerResourceMetricStatus { // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,27 +131,27 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional optional HorizontalPodAutoscalerStatus status = 3; } @@ -168,7 +168,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -184,9 +184,9 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; } @@ -204,10 +204,10 @@ message HorizontalPodAutoscalerSpec { // +optional optional int32 minReplicas = 2; - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. optional int32 maxReplicas = 3; - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional optional int32 targetCPUUtilizationPercentage = 4; @@ -215,22 +215,22 @@ message HorizontalPodAutoscalerSpec { // current status of a horizontal pod autoscaler message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional optional int64 observedGeneration = 1; - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. optional int32 desiredReplicas = 4; - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional optional int32 currentCPUUtilizationPercentage = 5; @@ -264,7 +264,7 @@ message MetricSpec { // +optional optional ResourceMetricSource resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -309,7 +309,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -336,18 +336,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric. // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -360,18 +360,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -384,13 +384,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -401,13 +401,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -431,7 +431,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -455,40 +455,40 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } // ScaleSpec describes the attributes of a scale subresource. message ScaleSpec { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional optional int32 replicas = 1; } // ScaleStatus represents the current status of a scale subresource. message ScaleStatus { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional optional string selector = 2; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 8e0a46525..b31425b3b 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -25,11 +25,13 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. // +structType=atomic type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -46,9 +48,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + + // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + + // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; // if not specified the default autoscaling policy will be used. // +optional TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` @@ -56,22 +60,22 @@ type HorizontalPodAutoscalerSpec struct { // current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. + // observedGeneration is the most recent generation observed by this autoscaler. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - // last time the HorizontalPodAutoscaler scaled the number of pods; + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - // current number of replicas of pods managed by this autoscaler. + // currentReplicas is the current number of replicas of pods managed by this autoscaler. CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - // desired number of replicas of pods managed by this autoscaler. + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler. DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, // e.g. 70 means that an average pod is using now 70% of its requested CPU. // +optional CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` @@ -79,6 +83,7 @@ type HorizontalPodAutoscalerStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { @@ -87,16 +92,17 @@ type HorizontalPodAutoscaler struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current information about the autoscaler. + // status is the current information about the autoscaler. // +optional Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { @@ -105,11 +111,12 @@ type HorizontalPodAutoscalerList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // list of horizontal pod autoscaler objects. + // items is the list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Scale represents a scaling request for a resource. type Scale struct { @@ -118,31 +125,31 @@ type Scale struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ScaleSpec describes the attributes of a scale subresource. type ScaleSpec struct { - // desired number of instances for the scaled object. + // replicas is the desired number of instances for the scaled object. // +optional Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` } // ScaleStatus represents the current status of a scale subresource. type ScaleStatus struct { - // actual number of observed instances of the scaled object. + // replicas is the actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. This is same + // selector is the label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } @@ -194,11 +201,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -206,7 +215,8 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod of the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -214,6 +224,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -231,6 +242,7 @@ type ObjectMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` @@ -239,6 +251,7 @@ type ObjectMetricSource struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -252,6 +265,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` @@ -273,11 +287,13 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -295,16 +311,19 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` + // container is the name of the container in the pods of the scaling target. Container string `json:"container" protobuf:"bytes,5,opt,name=container"` } @@ -315,14 +334,17 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional @@ -341,11 +363,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -353,13 +377,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -390,15 +416,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -413,6 +443,7 @@ type ObjectMetricStatus struct { // metricName is the name of the metric in question. MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` @@ -421,6 +452,7 @@ type ObjectMetricStatus struct { // When unset, just the metricName will be used to gather metrics. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional @@ -432,6 +464,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metricName is the name of the metric in question MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` @@ -451,6 +484,7 @@ type PodsMetricStatus struct { type ResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -458,6 +492,7 @@ type ResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. @@ -473,6 +508,7 @@ type ResourceMetricStatus struct { type ContainerResourceMetricStatus struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. It will only be @@ -480,11 +516,13 @@ type ContainerResourceMetricStatus struct { // specification. // +optional CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` + // container is the name of the container in the pods of the scaling taget Container string `json:"container" protobuf:"bytes,4,opt,name=container"` } @@ -495,12 +533,14 @@ type ExternalMetricStatus struct { // metricName is the name of a metric used for autoscaling in // metric system. MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series // within a given metric. // +optional MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` // currentValue is the current value of the metric (as a quantity) CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 192dc5f39..37c2b36a5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -53,9 +53,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -89,8 +89,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", + "spec": "spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", } func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { @@ -113,7 +113,7 @@ func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerList = map[string]string{ "": "list of horizontal pod autoscaler objects.", "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", + "items": "items is the list of horizontal pod autoscaler objects.", } func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { @@ -124,8 +124,8 @@ var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", + "maxReplicas": "maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { @@ -134,11 +134,11 @@ func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerStatus = map[string]string{ "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is the current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", } func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { @@ -151,7 +151,7 @@ var map_MetricSpec = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -165,7 +165,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -246,8 +246,8 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "spec": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -256,7 +256,7 @@ func (Scale) SwaggerDoc() map[string]string { var map_ScaleSpec = map[string]string{ "": "ScaleSpec describes the attributes of a scale subresource.", - "replicas": "desired number of instances for the scaled object.", + "replicas": "replicas is the desired number of instances for the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -265,8 +265,8 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "replicas": "replicas is the actual number of observed instances of the scaled object.", + "selector": "selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..36d86a5ec --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Scale) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/autoscaling/v2/doc.go b/vendor/k8s.io/api/autoscaling/v2/doc.go index f96a059b6..aafa2d4de 100644 --- a/vendor/k8s.io/api/autoscaling/v2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v2 // import "k8s.io/api/autoscaling/v2" diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go index 408413a78..ece6dedad 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto +// source: k8s.io/api/autoscaling/v2/generated.proto package v2 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{0} + return fileDescriptor_4d5f2c8767749221, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{1} + return fileDescriptor_4d5f2c8767749221, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{2} + return fileDescriptor_4d5f2c8767749221, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{3} + return fileDescriptor_4d5f2c8767749221, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{4} + return fileDescriptor_4d5f2c8767749221, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } func (*HPAScalingPolicy) ProtoMessage() {} func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{5} + return fileDescriptor_4d5f2c8767749221, []int{5} } func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } func (*HPAScalingRules) ProtoMessage() {} func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{6} + return fileDescriptor_4d5f2c8767749221, []int{6} } func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{7} + return fileDescriptor_4d5f2c8767749221, []int{7} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{8} + return fileDescriptor_4d5f2c8767749221, []int{8} } func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{9} + return fileDescriptor_4d5f2c8767749221, []int{9} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{10} + return fileDescriptor_4d5f2c8767749221, []int{10} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{11} + return fileDescriptor_4d5f2c8767749221, []int{11} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{12} + return fileDescriptor_4d5f2c8767749221, []int{12} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{13} + return fileDescriptor_4d5f2c8767749221, []int{13} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{14} + return fileDescriptor_4d5f2c8767749221, []int{14} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{15} + return fileDescriptor_4d5f2c8767749221, []int{15} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{16} + return fileDescriptor_4d5f2c8767749221, []int{16} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{17} + return fileDescriptor_4d5f2c8767749221, []int{17} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +554,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{18} + return fileDescriptor_4d5f2c8767749221, []int{18} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +582,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{19} + return fileDescriptor_4d5f2c8767749221, []int{19} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +610,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{20} + return fileDescriptor_4d5f2c8767749221, []int{20} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +638,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{21} + return fileDescriptor_4d5f2c8767749221, []int{21} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -666,7 +666,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{22} + return fileDescriptor_4d5f2c8767749221, []int{22} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -694,7 +694,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_b14d4df4b5f3935e, []int{23} + return fileDescriptor_4d5f2c8767749221, []int{23} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -747,120 +747,119 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2/generated.proto", fileDescriptor_b14d4df4b5f3935e) -} - -var fileDescriptor_b14d4df4b5f3935e = []byte{ - // 1735 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x1b, 0xc7, - 0x19, 0xd7, 0x92, 0xd4, 0x6b, 0xa8, 0xe7, 0xf8, 0xc5, 0xca, 0x30, 0x29, 0x6c, 0xdd, 0xda, 0x75, - 0xeb, 0x65, 0xcd, 0xba, 0x86, 0x51, 0x9f, 0xb4, 0x72, 0x5b, 0x0b, 0x96, 0x2a, 0x7a, 0x68, 0x5b, - 0x6d, 0xd1, 0x16, 0x1e, 0xee, 0x8e, 0xa8, 0xa9, 0xc8, 0x5d, 0x62, 0x77, 0x49, 0x5b, 0x06, 0x0a, - 0xf4, 0xd2, 0x7b, 0xd1, 0xc2, 0x28, 0xfa, 0x3f, 0x18, 0x39, 0x25, 0x70, 0x0e, 0x09, 0x10, 0x20, - 0x39, 0xf8, 0x12, 0xc0, 0x87, 0x1c, 0x7c, 0x22, 0x62, 0x06, 0xc8, 0x31, 0x7f, 0x80, 0x4f, 0xc1, - 0x3c, 0xf6, 0xc9, 0x97, 0xe8, 0xc8, 0x02, 0x74, 0xe3, 0xcc, 0x7c, 0xdf, 0xef, 0x7b, 0xcc, 0xf7, - 0x9a, 0x25, 0xd0, 0xf7, 0x6f, 0xba, 0x1a, 0xb5, 0x8b, 0xfb, 0xad, 0x2a, 0x71, 0x2c, 0xe2, 0x11, - 0xb7, 0xd8, 0x26, 0x96, 0x69, 0x3b, 0x45, 0x79, 0x80, 0x9b, 0xb4, 0x88, 0x5b, 0x9e, 0xed, 0x1a, - 0xb8, 0x4e, 0xad, 0x5a, 0xb1, 0x5d, 0x2a, 0xd6, 0x88, 0x45, 0x1c, 0xec, 0x11, 0x53, 0x6b, 0x3a, - 0xb6, 0x67, 0xc3, 0x1f, 0x09, 0x52, 0x0d, 0x37, 0xa9, 0x16, 0x21, 0xd5, 0xda, 0xa5, 0x95, 0xab, - 0x35, 0xea, 0xed, 0xb5, 0xaa, 0x9a, 0x61, 0x37, 0x8a, 0x35, 0xbb, 0x66, 0x17, 0x39, 0x47, 0xb5, - 0xb5, 0xcb, 0x57, 0x7c, 0xc1, 0x7f, 0x09, 0xa4, 0x15, 0x35, 0x22, 0xd4, 0xb0, 0x1d, 0x52, 0x6c, - 0x5f, 0x4b, 0x4a, 0x5b, 0xb9, 0x1e, 0xd2, 0x34, 0xb0, 0xb1, 0x47, 0x2d, 0xe2, 0x1c, 0x14, 0x9b, - 0xfb, 0x35, 0xce, 0xe4, 0x10, 0xd7, 0x6e, 0x39, 0x06, 0x19, 0x8b, 0xcb, 0x2d, 0x36, 0x88, 0x87, - 0xfb, 0xc9, 0x2a, 0x0e, 0xe2, 0x72, 0x5a, 0x96, 0x47, 0x1b, 0xbd, 0x62, 0x6e, 0x8c, 0x62, 0x70, - 0x8d, 0x3d, 0xd2, 0xc0, 0x49, 0x3e, 0xf5, 0x5b, 0x05, 0x5c, 0x58, 0xb7, 0x2d, 0x0f, 0x33, 0x0e, - 0x24, 0x8d, 0xd8, 0x22, 0x9e, 0x43, 0x8d, 0x0a, 0xff, 0x0d, 0xd7, 0x41, 0xc6, 0xc2, 0x0d, 0x92, - 0x53, 0x56, 0x95, 0xcb, 0xb3, 0x7a, 0xf1, 0x65, 0xa7, 0x30, 0xd1, 0xed, 0x14, 0x32, 0x7f, 0xc0, - 0x0d, 0xf2, 0xb6, 0x53, 0x28, 0xf4, 0x3a, 0x4e, 0xf3, 0x61, 0x18, 0x09, 0xe2, 0xcc, 0x70, 0x1b, - 0x4c, 0x79, 0xd8, 0xa9, 0x11, 0x2f, 0x97, 0x5a, 0x55, 0x2e, 0x67, 0x4b, 0x97, 0xb4, 0x81, 0x57, - 0xa7, 0x09, 0xe9, 0xf7, 0x39, 0xb9, 0xbe, 0x20, 0xe5, 0x4d, 0x89, 0x35, 0x92, 0x30, 0xb0, 0x08, - 0x66, 0x0d, 0x5f, 0xed, 0x5c, 0x9a, 0xab, 0xb6, 0x2c, 0x49, 0x67, 0x43, 0x7b, 0x42, 0x1a, 0xf5, - 0xbb, 0x21, 0x86, 0x7a, 0xd8, 0x6b, 0xb9, 0x47, 0x63, 0xe8, 0x0e, 0x98, 0x36, 0x5a, 0x8e, 0x43, - 0x2c, 0xdf, 0xd2, 0x5f, 0x8c, 0xb4, 0xf4, 0x21, 0xae, 0xb7, 0x88, 0xd0, 0x41, 0x5f, 0x94, 0x52, - 0xa7, 0xd7, 0x05, 0x08, 0xf2, 0xd1, 0xc6, 0x37, 0xf8, 0x99, 0x02, 0xce, 0xaf, 0x3b, 0xb6, 0xeb, - 0x3e, 0x24, 0x8e, 0x4b, 0x6d, 0x6b, 0xbb, 0xfa, 0x77, 0x62, 0x78, 0x88, 0xec, 0x12, 0x87, 0x58, - 0x06, 0x81, 0xab, 0x20, 0xb3, 0x4f, 0x2d, 0x53, 0x9a, 0x3b, 0xe7, 0x9b, 0x7b, 0x97, 0x5a, 0x26, - 0xe2, 0x27, 0x8c, 0x82, 0x3b, 0x24, 0x15, 0xa7, 0x88, 0x58, 0x5b, 0x02, 0x00, 0x37, 0xa9, 0x14, - 0x20, 0xb5, 0x82, 0x92, 0x0e, 0xac, 0x95, 0x37, 0xe4, 0x09, 0x8a, 0x50, 0xa9, 0x9f, 0x28, 0xe0, - 0xf4, 0x6f, 0x9f, 0x78, 0xc4, 0xb1, 0x70, 0x3d, 0x16, 0x68, 0x15, 0x30, 0xd5, 0xe0, 0x6b, 0xae, - 0x52, 0xb6, 0xf4, 0xf3, 0x91, 0x9e, 0xdb, 0x30, 0x89, 0xe5, 0xd1, 0x5d, 0x4a, 0x9c, 0x30, 0x4e, - 0xc4, 0x09, 0x92, 0x50, 0x47, 0x1e, 0x78, 0xea, 0x17, 0xbd, 0xea, 0x8b, 0xf0, 0x79, 0x2f, 0xea, - 0xbf, 0xaf, 0x70, 0x52, 0x3f, 0x50, 0xc0, 0xd2, 0x9d, 0xf2, 0x5a, 0x45, 0x70, 0x97, 0xed, 0x3a, - 0x35, 0x0e, 0xe0, 0x4d, 0x90, 0xf1, 0x0e, 0x9a, 0x7e, 0x06, 0x5c, 0xf4, 0x2f, 0xfc, 0xfe, 0x41, - 0x93, 0x65, 0xc0, 0xe9, 0x24, 0x3d, 0xdb, 0x47, 0x9c, 0x03, 0xfe, 0x18, 0x4c, 0xb6, 0x99, 0x5c, - 0xae, 0xe5, 0xa4, 0x3e, 0x2f, 0x59, 0x27, 0xb9, 0x32, 0x48, 0x9c, 0xc1, 0x5b, 0x60, 0xbe, 0x49, - 0x1c, 0x6a, 0x9b, 0x15, 0x62, 0xd8, 0x96, 0xe9, 0xf2, 0x80, 0x99, 0xd4, 0xcf, 0x48, 0xe2, 0xf9, - 0x72, 0xf4, 0x10, 0xc5, 0x69, 0xd5, 0xff, 0xa7, 0xc0, 0x62, 0xa8, 0x00, 0x6a, 0xd5, 0x89, 0x0b, - 0xff, 0x06, 0x56, 0x5c, 0x0f, 0x57, 0x69, 0x9d, 0x3e, 0xc5, 0x1e, 0xb5, 0xad, 0x1d, 0x6a, 0x99, - 0xf6, 0xe3, 0x38, 0x7a, 0xbe, 0xdb, 0x29, 0xac, 0x54, 0x06, 0x52, 0xa1, 0x21, 0x08, 0xf0, 0x2e, - 0x98, 0x73, 0x49, 0x9d, 0x18, 0x9e, 0xb0, 0x57, 0xfa, 0xe5, 0x52, 0xb7, 0x53, 0x98, 0xab, 0x44, - 0xf6, 0xdf, 0x76, 0x0a, 0xa7, 0x62, 0x8e, 0x11, 0x87, 0x28, 0xc6, 0x0c, 0xff, 0x04, 0x66, 0x9a, - 0xec, 0x17, 0x25, 0x6e, 0x2e, 0xb5, 0x9a, 0x1e, 0x11, 0x21, 0x49, 0x5f, 0xeb, 0x4b, 0xd2, 0x4b, - 0x33, 0x65, 0x09, 0x82, 0x02, 0x38, 0xf5, 0x45, 0x0a, 0x9c, 0xbb, 0x63, 0x3b, 0xf4, 0x29, 0x4b, - 0xfe, 0x7a, 0xd9, 0x36, 0xd7, 0x24, 0x18, 0x71, 0xe0, 0x23, 0x30, 0xc3, 0x9a, 0x8c, 0x89, 0x3d, - 0x2c, 0x03, 0xf3, 0x97, 0x11, 0xb1, 0x41, 0xaf, 0xd0, 0x9a, 0xfb, 0x35, 0xb6, 0xe1, 0x6a, 0x8c, - 0x5a, 0x6b, 0x5f, 0xd3, 0x44, 0xbd, 0xd8, 0x22, 0x1e, 0x0e, 0x53, 0x3a, 0xdc, 0x43, 0x01, 0x2a, - 0xfc, 0x23, 0xc8, 0xb8, 0x4d, 0x62, 0xc8, 0x00, 0xbd, 0x31, 0xcc, 0xa8, 0xfe, 0x3a, 0x56, 0x9a, - 0xc4, 0x08, 0xcb, 0x0b, 0x5b, 0x21, 0x8e, 0x08, 0x1f, 0x81, 0x29, 0x97, 0x07, 0x32, 0xbf, 0xcb, - 0x6c, 0xe9, 0xe6, 0x3b, 0x60, 0x8b, 0x44, 0x08, 0xf2, 0x4b, 0xac, 0x91, 0xc4, 0x55, 0xbf, 0x54, - 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x70, 0x9b, 0xda, 0x0e, 0xbc, 0x07, 0xa6, 0xf9, 0xce, 0x83, - 0xa6, 0x74, 0xe0, 0x95, 0x43, 0xdd, 0x1b, 0x0f, 0x51, 0x3d, 0xcb, 0xb2, 0xaf, 0x22, 0xd8, 0x91, - 0x8f, 0x03, 0x77, 0xc0, 0x2c, 0xff, 0x79, 0xdb, 0x7e, 0x6c, 0x49, 0xbf, 0x8d, 0x03, 0x3a, 0xcf, - 0x8a, 0x7e, 0xc5, 0x07, 0x40, 0x21, 0x96, 0xfa, 0xaf, 0x34, 0x58, 0x1d, 0x60, 0xcf, 0xba, 0x6d, - 0x99, 0x94, 0xc5, 0x38, 0xbc, 0x13, 0x4b, 0xf3, 0xeb, 0x89, 0x34, 0xbf, 0x38, 0x8a, 0x3f, 0x92, - 0xf6, 0x9b, 0xc1, 0x05, 0xa5, 0x62, 0x58, 0xd2, 0xcd, 0x6f, 0x3b, 0x85, 0x3e, 0x83, 0x95, 0x16, - 0x20, 0xc5, 0x2f, 0x03, 0xb6, 0x01, 0xac, 0x63, 0xd7, 0xbb, 0xef, 0x60, 0xcb, 0x15, 0x92, 0x68, - 0x83, 0xc8, 0xab, 0xbf, 0x72, 0xb8, 0xa0, 0x65, 0x1c, 0xfa, 0x8a, 0xd4, 0x02, 0x6e, 0xf6, 0xa0, - 0xa1, 0x3e, 0x12, 0xe0, 0x4f, 0xc1, 0x94, 0x43, 0xb0, 0x6b, 0x5b, 0xb9, 0x0c, 0xb7, 0x22, 0x08, - 0x16, 0xc4, 0x77, 0x91, 0x3c, 0x85, 0x3f, 0x03, 0xd3, 0x0d, 0xe2, 0xba, 0xb8, 0x46, 0x72, 0x93, - 0x9c, 0x30, 0x28, 0xaf, 0x5b, 0x62, 0x1b, 0xf9, 0xe7, 0xea, 0x57, 0x0a, 0x38, 0x3f, 0xc0, 0x8f, - 0x9b, 0xd4, 0xf5, 0xe0, 0x5f, 0x7a, 0xb2, 0x52, 0x3b, 0x9c, 0x81, 0x8c, 0x9b, 0xe7, 0x64, 0x50, - 0x0f, 0xfc, 0x9d, 0x48, 0x46, 0xee, 0x80, 0x49, 0xea, 0x91, 0x86, 0x5f, 0x67, 0x4a, 0xe3, 0xa7, - 0x4d, 0x58, 0xc1, 0x37, 0x18, 0x10, 0x12, 0x78, 0xea, 0x8b, 0xf4, 0x40, 0xb3, 0x58, 0xda, 0xc2, - 0x36, 0x58, 0xe0, 0x2b, 0xd9, 0x33, 0xc9, 0xae, 0x34, 0x6e, 0x58, 0x51, 0x18, 0x32, 0xa3, 0xe8, - 0x67, 0xa5, 0x16, 0x0b, 0x95, 0x18, 0x2a, 0x4a, 0x48, 0x81, 0xd7, 0x40, 0xb6, 0x41, 0x2d, 0x44, - 0x9a, 0x75, 0x6a, 0x60, 0x57, 0x36, 0xa1, 0xc5, 0x6e, 0xa7, 0x90, 0xdd, 0x0a, 0xb7, 0x51, 0x94, - 0x06, 0xfe, 0x1a, 0x64, 0x1b, 0xf8, 0x49, 0xc0, 0x22, 0x9a, 0xc5, 0x29, 0x29, 0x2f, 0xbb, 0x15, - 0x1e, 0xa1, 0x28, 0x1d, 0x2c, 0xb3, 0x18, 0x60, 0x6d, 0xd6, 0xcd, 0x65, 0xb8, 0x73, 0x7f, 0x32, - 0xb2, 0x21, 0xf3, 0xf2, 0x16, 0x09, 0x15, 0xce, 0x8d, 0x7c, 0x18, 0x68, 0x82, 0x99, 0xaa, 0x2c, - 0x35, 0x3c, 0xac, 0xb2, 0xa5, 0xdf, 0xbc, 0xc3, 0x7d, 0x49, 0x04, 0x7d, 0x8e, 0x85, 0x84, 0xbf, - 0x42, 0x01, 0xb2, 0xfa, 0x3c, 0x03, 0x2e, 0x0c, 0x2d, 0x91, 0xf0, 0x77, 0x00, 0xda, 0x55, 0x97, - 0x38, 0x6d, 0x62, 0xfe, 0x5e, 0x3c, 0x12, 0xd8, 0x4c, 0xc7, 0xee, 0x2f, 0xad, 0x9f, 0x65, 0xd9, - 0xb4, 0xdd, 0x73, 0x8a, 0xfa, 0x70, 0x40, 0x03, 0xcc, 0xb3, 0x1c, 0x13, 0x37, 0x46, 0xe5, 0xf8, - 0x38, 0x5e, 0x02, 0x2f, 0xb3, 0x69, 0x60, 0x33, 0x0a, 0x82, 0xe2, 0x98, 0x70, 0x0d, 0x2c, 0xca, - 0x49, 0x26, 0x71, 0x83, 0xe7, 0xa4, 0x9f, 0x17, 0xd7, 0xe3, 0xc7, 0x28, 0x49, 0xcf, 0x20, 0x4c, - 0xe2, 0x52, 0x87, 0x98, 0x01, 0x44, 0x26, 0x0e, 0x71, 0x3b, 0x7e, 0x8c, 0x92, 0xf4, 0xb0, 0x06, - 0x16, 0x24, 0xaa, 0xbc, 0xd5, 0xdc, 0x24, 0x8f, 0x89, 0xd1, 0x43, 0xa6, 0x6c, 0x4b, 0x41, 0x7c, - 0xaf, 0xc7, 0x60, 0x50, 0x02, 0x16, 0xda, 0x00, 0x18, 0x7e, 0xd1, 0x74, 0x73, 0x53, 0x5c, 0xc8, - 0xad, 0xf1, 0xa3, 0x24, 0x28, 0xbc, 0x61, 0x47, 0x0f, 0xb6, 0x5c, 0x14, 0x11, 0xa1, 0xfe, 0x57, - 0x01, 0x4b, 0xc9, 0x21, 0x35, 0x78, 0x0f, 0x28, 0x03, 0xdf, 0x03, 0x7f, 0x05, 0x33, 0x62, 0xe6, - 0xb1, 0x1d, 0x79, 0xed, 0xbf, 0x3a, 0x64, 0x59, 0xc3, 0x55, 0x52, 0xaf, 0x48, 0x56, 0x11, 0xc4, - 0xfe, 0x0a, 0x05, 0x90, 0xea, 0xb3, 0x0c, 0x00, 0x61, 0x4e, 0xc1, 0xeb, 0xb1, 0x3e, 0xb6, 0x9a, - 0xe8, 0x63, 0x4b, 0xd1, 0xc7, 0x45, 0xa4, 0x67, 0xdd, 0x03, 0x53, 0x36, 0x2f, 0x33, 0x52, 0xc3, - 0xab, 0x43, 0xfc, 0x18, 0xcc, 0x3b, 0x01, 0x90, 0x0e, 0x58, 0x63, 0x90, 0x75, 0x4a, 0x02, 0xc1, - 0x0d, 0x90, 0x69, 0xda, 0xa6, 0x3f, 0xa5, 0x0c, 0x1b, 0xeb, 0xca, 0xb6, 0xe9, 0xc6, 0xe0, 0x66, - 0x98, 0xc6, 0x6c, 0x17, 0x71, 0x08, 0x36, 0x25, 0xfa, 0x9f, 0x12, 0x78, 0x38, 0x66, 0x4b, 0xc5, - 0x21, 0x70, 0xfd, 0x1e, 0xec, 0xc2, 0x7b, 0xfe, 0x09, 0x0a, 0xe0, 0xe0, 0x3f, 0xc0, 0xb2, 0x91, - 0x7c, 0x00, 0xe7, 0xa6, 0x47, 0x0e, 0x56, 0x43, 0xbf, 0x0e, 0xe8, 0x67, 0xba, 0x9d, 0xc2, 0x72, - 0x0f, 0x09, 0xea, 0x95, 0xc4, 0x2c, 0x23, 0xf2, 0xdd, 0x24, 0xeb, 0xdc, 0x30, 0xcb, 0xfa, 0xbd, - 0x10, 0x85, 0x65, 0xfe, 0x09, 0x0a, 0xe0, 0xd4, 0xff, 0x65, 0xc0, 0x5c, 0xec, 0x2d, 0x76, 0xcc, - 0x91, 0x21, 0x92, 0xf9, 0xc8, 0x22, 0x43, 0xc0, 0x1d, 0x69, 0x64, 0x08, 0xc8, 0x63, 0x8a, 0x0c, - 0x21, 0xec, 0x98, 0x22, 0x23, 0x62, 0x59, 0x9f, 0xc8, 0xf8, 0x3c, 0xe5, 0x47, 0x86, 0x18, 0x16, - 0x0e, 0x17, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xe7, 0xed, 0x88, 0x59, 0x4d, 0xf3, 0xdd, - 0xaa, 0xdd, 0x6b, 0x61, 0xcb, 0xa3, 0xde, 0x81, 0x3e, 0xdb, 0xf3, 0x14, 0x36, 0xc1, 0x1c, 0x6e, - 0x13, 0x07, 0xd7, 0x08, 0xdf, 0x96, 0xf1, 0x31, 0x2e, 0xee, 0x12, 0x7b, 0x89, 0xae, 0x45, 0x70, - 0x50, 0x0c, 0x95, 0xb5, 0x74, 0xb9, 0x7e, 0xe0, 0x05, 0x4f, 0x5c, 0xd9, 0xe5, 0x78, 0x4b, 0x5f, - 0xeb, 0x39, 0x45, 0x7d, 0x38, 0xd4, 0xff, 0xa4, 0xc0, 0x72, 0xcf, 0xc7, 0x85, 0xd0, 0x29, 0xca, - 0x7b, 0x72, 0x4a, 0xea, 0x18, 0x9d, 0x92, 0x1e, 0xdb, 0x29, 0x1f, 0xa5, 0x00, 0xec, 0xed, 0x0f, - 0xf0, 0x80, 0x8f, 0x15, 0x86, 0x43, 0xab, 0xc4, 0x14, 0xc7, 0x3f, 0x70, 0x06, 0x8e, 0x8e, 0x23, - 0x51, 0x58, 0x94, 0x94, 0x73, 0xf4, 0x1f, 0x59, 0xc3, 0x4f, 0x5a, 0xe9, 0x23, 0xfb, 0xa4, 0xa5, - 0x7e, 0x9a, 0xf4, 0xdb, 0x09, 0xfc, 0x7c, 0xd6, 0xef, 0x96, 0xd3, 0xc7, 0x73, 0xcb, 0xea, 0xc7, - 0x0a, 0x58, 0x4a, 0x8e, 0x11, 0x27, 0xe4, 0xdb, 0xe9, 0x67, 0x71, 0xd5, 0x4f, 0xe2, 0x77, 0xd3, - 0xe7, 0x0a, 0x38, 0x7d, 0x72, 0xfe, 0x26, 0x51, 0x3f, 0xec, 0x55, 0xf7, 0x04, 0xfc, 0xd9, 0xa1, - 0x5f, 0x7e, 0xf9, 0x26, 0x3f, 0xf1, 0xea, 0x4d, 0x7e, 0xe2, 0xf5, 0x9b, 0xfc, 0xc4, 0x3f, 0xbb, - 0x79, 0xe5, 0x65, 0x37, 0xaf, 0xbc, 0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0x75, 0x37, 0xaf, - 0xfc, 0xfb, 0x9b, 0xfc, 0xc4, 0x9f, 0x53, 0xed, 0xd2, 0xf7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x72, - 0xba, 0x02, 0x95, 0x47, 0x1c, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/autoscaling/v2/generated.proto", fileDescriptor_4d5f2c8767749221) +} + +var fileDescriptor_4d5f2c8767749221 = []byte{ + // 1722 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x8f, 0x1b, 0x49, + 0x19, 0x9f, 0xb6, 0x3d, 0xaf, 0xf2, 0x3c, 0x2b, 0x2f, 0x67, 0xa2, 0xd8, 0xa3, 0x26, 0x90, 0x07, + 0xa4, 0x4d, 0x4c, 0x88, 0x22, 0x72, 0x40, 0xd3, 0x13, 0x20, 0xa3, 0xcc, 0x30, 0x4e, 0x39, 0xc9, + 0x00, 0x02, 0x94, 0x72, 0x77, 0x8d, 0xa7, 0x18, 0xbb, 0xdb, 0xea, 0x6e, 0x3b, 0x99, 0x48, 0x48, + 0x5c, 0xb8, 0x23, 0x50, 0x84, 0xf8, 0x1f, 0x22, 0x4e, 0xa0, 0x70, 0x00, 0x09, 0x69, 0xf7, 0x90, + 0xcb, 0x4a, 0x39, 0xec, 0x21, 0x27, 0x6b, 0xe3, 0x95, 0xf6, 0xb8, 0x7f, 0x40, 0x4e, 0xab, 0x7a, + 0xf4, 0xd3, 0xaf, 0x71, 0x76, 0x32, 0xd2, 0xdc, 0x5c, 0x55, 0xdf, 0xf7, 0xfb, 0x1e, 0xf5, 0xbd, + 0xaa, 0x0d, 0xae, 0xee, 0xdf, 0x76, 0x35, 0x6a, 0x17, 0x71, 0x93, 0x16, 0x71, 0xcb, 0xb3, 0x5d, + 0x03, 0xd7, 0xa9, 0x55, 0x2b, 0xb6, 0x4b, 0xc5, 0x1a, 0xb1, 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x4d, + 0xc7, 0xf6, 0x6c, 0x78, 0x5e, 0x90, 0x6a, 0xb8, 0x49, 0xb5, 0x08, 0xa9, 0xd6, 0x2e, 0xad, 0x5c, + 0xaf, 0x51, 0x6f, 0xaf, 0x55, 0xd5, 0x0c, 0xbb, 0x51, 0xac, 0xd9, 0x35, 0xbb, 0xc8, 0x39, 0xaa, + 0xad, 0x5d, 0xbe, 0xe2, 0x0b, 0xfe, 0x4b, 0x20, 0xad, 0xa8, 0x11, 0xa1, 0x86, 0xed, 0x90, 0x62, + 0xfb, 0x46, 0x52, 0xda, 0xca, 0xcd, 0x90, 0xa6, 0x81, 0x8d, 0x3d, 0x6a, 0x11, 0xe7, 0xa0, 0xd8, + 0xdc, 0xaf, 0x71, 0x26, 0x87, 0xb8, 0x76, 0xcb, 0x31, 0xc8, 0x58, 0x5c, 0x6e, 0xb1, 0x41, 0x3c, + 0xdc, 0x4f, 0x56, 0x71, 0x10, 0x97, 0xd3, 0xb2, 0x3c, 0xda, 0xe8, 0x15, 0x73, 0x6b, 0x14, 0x83, + 0x6b, 0xec, 0x91, 0x06, 0x4e, 0xf2, 0xa9, 0x5f, 0x29, 0xe0, 0xe2, 0xba, 0x6d, 0x79, 0x98, 0x71, + 0x20, 0x69, 0xc4, 0x16, 0xf1, 0x1c, 0x6a, 0x54, 0xf8, 0x6f, 0xb8, 0x0e, 0x32, 0x16, 0x6e, 0x90, + 0x9c, 0xb2, 0xaa, 0x5c, 0x99, 0xd5, 0x8b, 0xaf, 0x3b, 0x85, 0x89, 0x6e, 0xa7, 0x90, 0xf9, 0x25, + 0x6e, 0x90, 0xf7, 0x9d, 0x42, 0xa1, 0xd7, 0x71, 0x9a, 0x0f, 0xc3, 0x48, 0x10, 0x67, 0x86, 0xdb, + 0x60, 0xca, 0xc3, 0x4e, 0x8d, 0x78, 0xb9, 0xd4, 0xaa, 0x72, 0x25, 0x5b, 0xba, 0xac, 0x0d, 0xbc, + 0x3a, 0x4d, 0x48, 0x7f, 0xc8, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, + 0x30, 0x6b, 0xf8, 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, + 0x5f, 0x0f, 0x31, 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1a, 0x43, 0x77, 0xc0, 0xb4, 0xd1, 0x72, 0x1c, + 0x62, 0xf9, 0x96, 0xfe, 0x60, 0xa4, 0xa5, 0x8f, 0x71, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, + 0x3a, 0xbd, 0x2e, 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x14, 0x70, 0x61, 0xdd, 0xb1, 0x5d, + 0xf7, 0x31, 0x71, 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x3f, 0x10, 0xc3, 0x43, 0x64, 0x97, 0x38, 0xc4, + 0x32, 0x08, 0x5c, 0x05, 0x99, 0x7d, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0xfb, 0xd4, 0x32, + 0x11, 0x3f, 0x61, 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, + 0x00, 0xa9, 0x15, 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xaf, 0x02, + 0x4e, 0xff, 0xec, 0x99, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, + 0x2a, 0x65, 0x4b, 0xdf, 0x1f, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa5, 0xc4, 0x09, 0xe3, + 0x44, 0x9c, 0x20, 0x09, 0x75, 0xe4, 0x81, 0xa7, 0x7e, 0xda, 0xab, 0xbe, 0x08, 0x9f, 0x8f, 0xa2, + 0xfe, 0xc7, 0x0a, 0x27, 0xf5, 0x9f, 0x0a, 0x58, 0xba, 0x57, 0x5e, 0xab, 0x08, 0xee, 0xb2, 0x5d, + 0xa7, 0xc6, 0x01, 0xbc, 0x0d, 0x32, 0xde, 0x41, 0xd3, 0xcf, 0x80, 0x4b, 0xfe, 0x85, 0x3f, 0x3c, + 0x68, 0xb2, 0x0c, 0x38, 0x9d, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xef, 0x80, 0xc9, 0x36, 0x93, + 0xcb, 0xb5, 0x9c, 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x07, 0xcc, 0x37, + 0x89, 0x43, 0x6d, 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x19, 0x49, 0x3c, + 0x5f, 0x8e, 0x1e, 0xa2, 0x38, 0xad, 0xfa, 0x8f, 0x14, 0x58, 0x0c, 0x15, 0x40, 0xad, 0x3a, 0x71, + 0xe1, 0xef, 0xc1, 0x8a, 0xeb, 0xe1, 0x2a, 0xad, 0xd3, 0xe7, 0xd8, 0xa3, 0xb6, 0xb5, 0x43, 0x2d, + 0xd3, 0x7e, 0x1a, 0x47, 0xcf, 0x77, 0x3b, 0x85, 0x95, 0xca, 0x40, 0x2a, 0x34, 0x04, 0x01, 0xde, + 0x07, 0x73, 0x2e, 0xa9, 0x13, 0xc3, 0x13, 0xf6, 0x4a, 0xbf, 0x5c, 0xee, 0x76, 0x0a, 0x73, 0x95, + 0xc8, 0xfe, 0xfb, 0x4e, 0xe1, 0x54, 0xcc, 0x31, 0xe2, 0x10, 0xc5, 0x98, 0xe1, 0xaf, 0xc1, 0x4c, + 0x93, 0xfd, 0xa2, 0xc4, 0xcd, 0xa5, 0x56, 0xd3, 0x23, 0x22, 0x24, 0xe9, 0x6b, 0x7d, 0x49, 0x7a, + 0x69, 0xa6, 0x2c, 0x41, 0x50, 0x00, 0xa7, 0xbe, 0x4a, 0x81, 0x73, 0xf7, 0x6c, 0x87, 0x3e, 0x67, + 0xc9, 0x5f, 0x2f, 0xdb, 0xe6, 0x9a, 0x04, 0x23, 0x0e, 0x7c, 0x02, 0x66, 0x58, 0x93, 0x31, 0xb1, + 0x87, 0x65, 0x60, 0xfe, 0x30, 0x22, 0x36, 0xe8, 0x15, 0x5a, 0x73, 0xbf, 0xc6, 0x36, 0x5c, 0x8d, + 0x51, 0x6b, 0xed, 0x1b, 0x9a, 0xa8, 0x17, 0x5b, 0xc4, 0xc3, 0x61, 0x4a, 0x87, 0x7b, 0x28, 0x40, + 0x85, 0xbf, 0x02, 0x19, 0xb7, 0x49, 0x0c, 0x19, 0xa0, 0xb7, 0x86, 0x19, 0xd5, 0x5f, 0xc7, 0x4a, + 0x93, 0x18, 0x61, 0x79, 0x61, 0x2b, 0xc4, 0x11, 0xe1, 0x13, 0x30, 0xe5, 0xf2, 0x40, 0xe6, 0x77, + 0x99, 0x2d, 0xdd, 0xfe, 0x00, 0x6c, 0x91, 0x08, 0x41, 0x7e, 0x89, 0x35, 0x92, 0xb8, 0xea, 0x67, + 0x0a, 0x28, 0x0c, 0xe0, 0xd4, 0xc9, 0x1e, 0x6e, 0x53, 0xdb, 0x81, 0x0f, 0xc0, 0x34, 0xdf, 0x79, + 0xd4, 0x94, 0x0e, 0xbc, 0x76, 0xa8, 0x7b, 0xe3, 0x21, 0xaa, 0x67, 0x59, 0xf6, 0x55, 0x04, 0x3b, + 0xf2, 0x71, 0xe0, 0x0e, 0x98, 0xe5, 0x3f, 0xef, 0xda, 0x4f, 0x2d, 0xe9, 0xb7, 0x71, 0x40, 0xe7, + 0x59, 0xd1, 0xaf, 0xf8, 0x00, 0x28, 0xc4, 0x52, 0xff, 0x9c, 0x06, 0xab, 0x03, 0xec, 0x59, 0xb7, + 0x2d, 0x93, 0xb2, 0x18, 0x87, 0xf7, 0x62, 0x69, 0x7e, 0x33, 0x91, 0xe6, 0x97, 0x46, 0xf1, 0x47, + 0xd2, 0x7e, 0x33, 0xb8, 0xa0, 0x54, 0x0c, 0x4b, 0xba, 0xf9, 0x7d, 0xa7, 0xd0, 0x67, 0xb0, 0xd2, + 0x02, 0xa4, 0xf8, 0x65, 0xc0, 0x36, 0x80, 0x75, 0xec, 0x7a, 0x0f, 0x1d, 0x6c, 0xb9, 0x42, 0x12, + 0x6d, 0x10, 0x79, 0xf5, 0xd7, 0x0e, 0x17, 0xb4, 0x8c, 0x43, 0x5f, 0x91, 0x5a, 0xc0, 0xcd, 0x1e, + 0x34, 0xd4, 0x47, 0x02, 0xfc, 0x1e, 0x98, 0x72, 0x08, 0x76, 0x6d, 0x2b, 0x97, 0xe1, 0x56, 0x04, + 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x2a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b, + 0xe4, 0x84, 0x41, 0x79, 0xdd, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0x3f, 0x57, 0xc0, 0x85, 0x01, 0x7e, + 0xdc, 0xa4, 0xae, 0x07, 0x7f, 0xdb, 0x93, 0x95, 0xda, 0xe1, 0x0c, 0x64, 0xdc, 0x3c, 0x27, 0x83, + 0x7a, 0xe0, 0xef, 0x44, 0x32, 0x72, 0x07, 0x4c, 0x52, 0x8f, 0x34, 0xfc, 0x3a, 0x53, 0x1a, 0x3f, + 0x6d, 0xc2, 0x0a, 0xbe, 0xc1, 0x80, 0x90, 0xc0, 0x53, 0x5f, 0xa5, 0x07, 0x9a, 0xc5, 0xd2, 0x16, + 0xb6, 0xc1, 0x02, 0x5f, 0xc9, 0x9e, 0x49, 0x76, 0xa5, 0x71, 0xc3, 0x8a, 0xc2, 0x90, 0x19, 0x45, + 0x3f, 0x2b, 0xb5, 0x58, 0xa8, 0xc4, 0x50, 0x51, 0x42, 0x0a, 0xbc, 0x01, 0xb2, 0x0d, 0x6a, 0x21, + 0xd2, 0xac, 0x53, 0x03, 0xbb, 0xb2, 0x09, 0x2d, 0x76, 0x3b, 0x85, 0xec, 0x56, 0xb8, 0x8d, 0xa2, + 0x34, 0xf0, 0xc7, 0x20, 0xdb, 0xc0, 0xcf, 0x02, 0x16, 0xd1, 0x2c, 0x4e, 0x49, 0x79, 0xd9, 0xad, + 0xf0, 0x08, 0x45, 0xe9, 0x60, 0x99, 0xc5, 0x00, 0x6b, 0xb3, 0x6e, 0x2e, 0xc3, 0x9d, 0xfb, 0xdd, + 0x91, 0x0d, 0x99, 0x97, 0xb7, 0x48, 0xa8, 0x70, 0x6e, 0xe4, 0xc3, 0x40, 0x13, 0xcc, 0x54, 0x65, + 0xa9, 0xe1, 0x61, 0x95, 0x2d, 0xfd, 0xe4, 0x03, 0xee, 0x4b, 0x22, 0xe8, 0x73, 0x2c, 0x24, 0xfc, + 0x15, 0x0a, 0x90, 0xd5, 0x97, 0x19, 0x70, 0x71, 0x68, 0x89, 0x84, 0x3f, 0x07, 0xd0, 0xae, 0xba, + 0xc4, 0x69, 0x13, 0xf3, 0x17, 0xe2, 0x91, 0xc0, 0x66, 0x3a, 0x76, 0x7f, 0x69, 0xfd, 0x2c, 0xcb, + 0xa6, 0xed, 0x9e, 0x53, 0xd4, 0x87, 0x03, 0x1a, 0x60, 0x9e, 0xe5, 0x98, 0xb8, 0x31, 0x2a, 0xc7, + 0xc7, 0xf1, 0x12, 0x78, 0x99, 0x4d, 0x03, 0x9b, 0x51, 0x10, 0x14, 0xc7, 0x84, 0x6b, 0x60, 0x51, + 0x4e, 0x32, 0x89, 0x1b, 0x3c, 0x27, 0xfd, 0xbc, 0xb8, 0x1e, 0x3f, 0x46, 0x49, 0x7a, 0x06, 0x61, + 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0xbb, 0xf1, 0x63, 0x94, 0xa4, 0x87, 0x35, + 0xb0, 0x20, 0x51, 0xe5, 0xad, 0xe6, 0x26, 0x79, 0x4c, 0x8c, 0x1e, 0x32, 0x65, 0x5b, 0x0a, 0xe2, + 0x7b, 0x3d, 0x06, 0x83, 0x12, 0xb0, 0xd0, 0x06, 0xc0, 0xf0, 0x8b, 0xa6, 0x9b, 0x9b, 0xe2, 0x42, + 0xee, 0x8c, 0x1f, 0x25, 0x41, 0xe1, 0x0d, 0x3b, 0x7a, 0xb0, 0xe5, 0xa2, 0x88, 0x08, 0xf5, 0x6f, + 0x0a, 0x58, 0x4a, 0x0e, 0xa9, 0xc1, 0x7b, 0x40, 0x19, 0xf8, 0x1e, 0xf8, 0x1d, 0x98, 0x11, 0x33, + 0x8f, 0xed, 0xc8, 0x6b, 0xff, 0xd1, 0x21, 0xcb, 0x1a, 0xae, 0x92, 0x7a, 0x45, 0xb2, 0x8a, 0x20, + 0xf6, 0x57, 0x28, 0x80, 0x54, 0x5f, 0x64, 0x00, 0x08, 0x73, 0x0a, 0xde, 0x8c, 0xf5, 0xb1, 0xd5, + 0x44, 0x1f, 0x5b, 0x8a, 0x3e, 0x2e, 0x22, 0x3d, 0xeb, 0x01, 0x98, 0xb2, 0x79, 0x99, 0x91, 0x1a, + 0x5e, 0x1f, 0xe2, 0xc7, 0x60, 0xde, 0x09, 0x80, 0x74, 0xc0, 0x1a, 0x83, 0xac, 0x53, 0x12, 0x08, + 0x6e, 0x80, 0x4c, 0xd3, 0x36, 0xfd, 0x29, 0x65, 0xd8, 0x58, 0x57, 0xb6, 0x4d, 0x37, 0x06, 0x37, + 0xc3, 0x34, 0x66, 0xbb, 0x88, 0x43, 0xb0, 0x29, 0xd1, 0xff, 0x94, 0xc0, 0xc3, 0x31, 0x5b, 0x2a, + 0x0e, 0x81, 0xeb, 0xf7, 0x60, 0x17, 0xde, 0xf3, 0x4f, 0x50, 0x00, 0x07, 0xff, 0x08, 0x96, 0x8d, + 0xe4, 0x03, 0x38, 0x37, 0x3d, 0x72, 0xb0, 0x1a, 0xfa, 0x75, 0x40, 0x3f, 0xd3, 0xed, 0x14, 0x96, + 0x7b, 0x48, 0x50, 0xaf, 0x24, 0x66, 0x19, 0x91, 0xef, 0x26, 0x59, 0xe7, 0x86, 0x59, 0xd6, 0xef, + 0x85, 0x28, 0x2c, 0xf3, 0x4f, 0x50, 0x00, 0xa7, 0xfe, 0x3d, 0x03, 0xe6, 0x62, 0x6f, 0xb1, 0x63, + 0x8e, 0x0c, 0x91, 0xcc, 0x47, 0x16, 0x19, 0x02, 0xee, 0x48, 0x23, 0x43, 0x40, 0x1e, 0x53, 0x64, + 0x08, 0x61, 0xc7, 0x14, 0x19, 0x11, 0xcb, 0xfa, 0x44, 0xc6, 0x27, 0x29, 0x3f, 0x32, 0xc4, 0xb0, + 0x70, 0xb8, 0xc8, 0x10, 0xb4, 0x91, 0xc8, 0xd8, 0x8e, 0x3e, 0x6f, 0x47, 0xcc, 0x6a, 0x9a, 0xef, + 0x56, 0xed, 0x41, 0x0b, 0x5b, 0x1e, 0xf5, 0x0e, 0xf4, 0xd9, 0x9e, 0xa7, 0xb0, 0x09, 0xe6, 0x70, + 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8, 0x4b, 0x74, 0x2d, 0x82, + 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x23, 0x2f, 0x78, 0xe2, 0xca, 0x2e, 0xc7, 0x5b, 0xfa, + 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x35, 0x05, 0x96, 0x7b, 0x3e, 0x2e, 0x84, 0x4e, 0x51, + 0x3e, 0x92, 0x53, 0x52, 0xc7, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x77, 0x0a, 0xc0, 0xde, 0xfe, + 0x00, 0x0f, 0xf8, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x2d, 0x67, 0xe0, 0xe8, 0x38, + 0x12, 0x85, 0x45, 0x49, 0x39, 0x47, 0xff, 0x91, 0x35, 0xfc, 0xa4, 0x95, 0x3e, 0xb2, 0x4f, 0x5a, + 0xea, 0xff, 0x92, 0x7e, 0x3b, 0x81, 0x9f, 0xcf, 0xfa, 0xdd, 0x72, 0xfa, 0x78, 0x6e, 0x59, 0xfd, + 0x8f, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x09, 0xf9, 0x76, 0xfa, 0xff, 0xb8, 0xea, 0x27, 0xf1, 0xbb, + 0xe9, 0x4b, 0x05, 0x9c, 0x3e, 0x39, 0x7f, 0x93, 0xa8, 0xff, 0xea, 0x55, 0xf7, 0x04, 0xfc, 0xd9, + 0xa1, 0xff, 0xf4, 0xf5, 0xbb, 0xfc, 0xc4, 0x9b, 0x77, 0xf9, 0x89, 0xb7, 0xef, 0xf2, 0x13, 0x7f, + 0xea, 0xe6, 0x95, 0xd7, 0xdd, 0xbc, 0xf2, 0xa6, 0x9b, 0x57, 0xde, 0x76, 0xf3, 0xca, 0x17, 0xdd, + 0xbc, 0xf2, 0x97, 0x2f, 0xf3, 0x13, 0xbf, 0x39, 0x3f, 0xf0, 0x9f, 0xc2, 0x6f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xca, 0x8b, 0x47, 0xba, 0x45, 0x1c, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v2/generated.proto b/vendor/k8s.io/api/autoscaling/v2/generated.proto index e47d94a47..8f2ee5803 100644 --- a/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v2"; +option go_package = "k8s.io/api/autoscaling/v2"; // ContainerResourceMetricSource indicates how to scale on a resource metric known to // Kubernetes, as specified in requests and limits, describing each pod in the @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -293,7 +293,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -393,12 +393,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -412,12 +412,12 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -495,7 +495,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2/types.go b/vendor/k8s.io/api/autoscaling/v2/types.go index 9931f6146..69a7b2701 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2/types.go @@ -26,6 +26,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource @@ -59,9 +60,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -83,11 +86,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -105,11 +110,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -117,6 +124,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -125,6 +133,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -144,6 +153,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -171,7 +181,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -179,10 +189,12 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value Max is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +listType=atomic @@ -203,12 +215,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -249,8 +263,10 @@ const ( type ObjectMetricSource struct { // describedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +278,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +293,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +308,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +322,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +331,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +343,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -405,15 +430,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -432,11 +461,13 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -444,6 +475,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -451,6 +483,7 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -465,8 +498,10 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` } @@ -476,6 +511,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -486,8 +522,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -498,11 +535,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -511,6 +550,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -520,10 +560,12 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -532,6 +574,7 @@ type MetricValueStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { diff --git a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 05355a552..1941b1ef5 100644 --- a/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -288,7 +288,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..99ae74865 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index 28832c152..69567089b 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +// source: k8s.io/api/autoscaling/v2beta1/generated.proto package v2beta1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{0} + return fileDescriptor_ea74040359c1ed83, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{1} + return fileDescriptor_ea74040359c1ed83, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{2} + return fileDescriptor_ea74040359c1ed83, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{3} + return fileDescriptor_ea74040359c1ed83, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{4} + return fileDescriptor_ea74040359c1ed83, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{5} + return fileDescriptor_ea74040359c1ed83, []int{5} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{6} + return fileDescriptor_ea74040359c1ed83, []int{6} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{7} + return fileDescriptor_ea74040359c1ed83, []int{7} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{8} + return fileDescriptor_ea74040359c1ed83, []int{8} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{9} + return fileDescriptor_ea74040359c1ed83, []int{9} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{10} + return fileDescriptor_ea74040359c1ed83, []int{10} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{11} + return fileDescriptor_ea74040359c1ed83, []int{11} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{12} + return fileDescriptor_ea74040359c1ed83, []int{12} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{13} + return fileDescriptor_ea74040359c1ed83, []int{13} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{14} + return fileDescriptor_ea74040359c1ed83, []int{14} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{15} + return fileDescriptor_ea74040359c1ed83, []int{15} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{16} + return fileDescriptor_ea74040359c1ed83, []int{16} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{17} + return fileDescriptor_ea74040359c1ed83, []int{17} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -573,109 +573,108 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) -} - -var fileDescriptor_26c1bfc7a52d0478 = []byte{ - // 1562 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x6c, 0x1b, 0xc5, - 0x1b, 0x8f, 0xed, 0xcd, 0xeb, 0x73, 0x9a, 0xc7, 0xb4, 0xff, 0xd6, 0x4d, 0xff, 0xb5, 0xa3, 0x15, - 0x42, 0xa1, 0xa2, 0xbb, 0xad, 0x09, 0x0f, 0x09, 0x21, 0x11, 0x1b, 0x68, 0x2b, 0x92, 0xb6, 0x4c, - 0xd2, 0x0a, 0x41, 0x8b, 0x98, 0xac, 0xa7, 0xce, 0x12, 0x7b, 0xd7, 0xda, 0x19, 0x5b, 0x4d, 0x11, - 0x12, 0x42, 0xe2, 0xce, 0x05, 0xce, 0x20, 0x71, 0x45, 0x88, 0x0b, 0x9c, 0xb9, 0xf5, 0xd8, 0x63, - 0x2b, 0x90, 0x45, 0xcd, 0x81, 0x33, 0xd7, 0x9e, 0xd0, 0xcc, 0xce, 0xae, 0x77, 0xfd, 0x88, 0x1d, - 0x37, 0x0d, 0x0f, 0xf5, 0xe6, 0xdd, 0xf9, 0xbe, 0xdf, 0x37, 0xf3, 0xfb, 0x5e, 0xf3, 0xad, 0xe1, - 0xc2, 0xce, 0x2b, 0xcc, 0xb0, 0x5d, 0x73, 0xa7, 0xbe, 0x45, 0x3d, 0x87, 0x72, 0xca, 0xcc, 0x06, - 0x75, 0x4a, 0xae, 0x67, 0xaa, 0x05, 0x52, 0xb3, 0x4d, 0x52, 0xe7, 0x2e, 0xb3, 0x48, 0xc5, 0x76, - 0xca, 0x66, 0x23, 0xbf, 0x45, 0x39, 0x39, 0x6f, 0x96, 0xa9, 0x43, 0x3d, 0xc2, 0x69, 0xc9, 0xa8, - 0x79, 0x2e, 0x77, 0x51, 0xd6, 0x97, 0x37, 0x48, 0xcd, 0x36, 0x22, 0xf2, 0x86, 0x92, 0x5f, 0x3c, - 0x5b, 0xb6, 0xf9, 0x76, 0x7d, 0xcb, 0xb0, 0xdc, 0xaa, 0x59, 0x76, 0xcb, 0xae, 0x29, 0xd5, 0xb6, - 0xea, 0xb7, 0xe4, 0x93, 0x7c, 0x90, 0xbf, 0x7c, 0xb8, 0x45, 0x3d, 0x62, 0xde, 0x72, 0x3d, 0x6a, - 0x36, 0xba, 0x4c, 0x2e, 0xae, 0xb4, 0x65, 0xaa, 0xc4, 0xda, 0xb6, 0x1d, 0xea, 0xed, 0x9a, 0xb5, - 0x9d, 0xb2, 0x54, 0xf2, 0x28, 0x73, 0xeb, 0x9e, 0x45, 0xf7, 0xa5, 0xc5, 0xcc, 0x2a, 0xe5, 0xa4, - 0x97, 0x2d, 0xb3, 0x9f, 0x96, 0x57, 0x77, 0xb8, 0x5d, 0xed, 0x36, 0xf3, 0xd2, 0x20, 0x05, 0x66, - 0x6d, 0xd3, 0x2a, 0xe9, 0xd4, 0xd3, 0xff, 0x48, 0xc2, 0xe9, 0xa2, 0xeb, 0x70, 0x22, 0x34, 0xb0, - 0x3a, 0xc4, 0x3a, 0xe5, 0x9e, 0x6d, 0x6d, 0xc8, 0xdf, 0xa8, 0x08, 0x9a, 0x43, 0xaa, 0x34, 0x93, - 0x58, 0x4a, 0x2c, 0x4f, 0x17, 0xcc, 0xbb, 0xcd, 0xdc, 0x58, 0xab, 0x99, 0xd3, 0x2e, 0x93, 0x2a, - 0x7d, 0xd4, 0xcc, 0xe5, 0xba, 0x89, 0x33, 0x02, 0x18, 0x21, 0x82, 0xa5, 0x32, 0x7a, 0x17, 0x32, - 0x9c, 0x78, 0x65, 0xca, 0x57, 0x1b, 0xd4, 0x23, 0x65, 0x7a, 0x8d, 0xdb, 0x15, 0xfb, 0x0e, 0xe1, - 0xb6, 0xeb, 0x64, 0x92, 0x4b, 0x89, 0xe5, 0xf1, 0xc2, 0xff, 0x5b, 0xcd, 0x5c, 0x66, 0xb3, 0x8f, - 0x0c, 0xee, 0xab, 0x8d, 0x1a, 0x80, 0x62, 0x6b, 0xd7, 0x49, 0xa5, 0x4e, 0x33, 0xa9, 0xa5, 0xc4, - 0x72, 0x3a, 0x6f, 0x18, 0xed, 0x28, 0x09, 0x59, 0x31, 0x6a, 0x3b, 0x65, 0x19, 0x36, 0x81, 0xcb, - 0x8c, 0x77, 0xea, 0xc4, 0xe1, 0x36, 0xdf, 0x2d, 0x1c, 0x6f, 0x35, 0x73, 0x68, 0xb3, 0x0b, 0x0d, - 0xf7, 0xb0, 0x80, 0x4c, 0x98, 0xb6, 0x02, 0xde, 0x32, 0x9a, 0xe4, 0x66, 0x41, 0x71, 0x33, 0xdd, - 0x26, 0xb4, 0x2d, 0xa3, 0xff, 0xb9, 0x07, 0xd3, 0x9c, 0xf0, 0x3a, 0x3b, 0x18, 0xa6, 0xdf, 0x87, - 0x93, 0x56, 0xdd, 0xf3, 0xa8, 0xd3, 0x9f, 0xea, 0xd3, 0xad, 0x66, 0xee, 0x64, 0xb1, 0x9f, 0x10, - 0xee, 0xaf, 0x8f, 0x3e, 0x81, 0xa3, 0xf1, 0xc5, 0xc7, 0x61, 0xfb, 0x94, 0x3a, 0xe0, 0xd1, 0x62, - 0x37, 0x24, 0xee, 0x65, 0x67, 0xff, 0x9c, 0x7f, 0x99, 0x80, 0x53, 0x45, 0xcf, 0x65, 0xec, 0x3a, - 0xf5, 0x98, 0xed, 0x3a, 0x57, 0xb6, 0x3e, 0xa2, 0x16, 0xc7, 0xf4, 0x16, 0xf5, 0xa8, 0x63, 0x51, - 0xb4, 0x04, 0xda, 0x8e, 0xed, 0x94, 0x14, 0xe3, 0x33, 0x01, 0xe3, 0x6f, 0xdb, 0x4e, 0x09, 0xcb, - 0x15, 0x21, 0x21, 0x7d, 0x92, 0x8c, 0x4b, 0x44, 0x08, 0xcf, 0x03, 0x90, 0x9a, 0xad, 0x0c, 0x48, - 0x2a, 0xa6, 0x0b, 0x48, 0xc9, 0xc1, 0xea, 0xd5, 0x4b, 0x6a, 0x05, 0x47, 0xa4, 0xf4, 0xaf, 0x52, - 0x70, 0xec, 0xcd, 0xdb, 0x9c, 0x7a, 0x0e, 0xa9, 0xc4, 0x92, 0x2d, 0x0f, 0x50, 0x95, 0xcf, 0x97, - 0xdb, 0x81, 0x10, 0x82, 0xad, 0x87, 0x2b, 0x38, 0x22, 0x85, 0x5c, 0x98, 0xf5, 0x9f, 0x36, 0x68, - 0x85, 0x5a, 0xdc, 0xf5, 0xe4, 0x66, 0xd3, 0xf9, 0x17, 0xf6, 0xf2, 0x07, 0x33, 0x44, 0xe9, 0x31, - 0x1a, 0xe7, 0x8d, 0x35, 0xb2, 0x45, 0x2b, 0x81, 0x6a, 0x01, 0xb5, 0x9a, 0xb9, 0xd9, 0xf5, 0x18, - 0x1c, 0xee, 0x80, 0x47, 0x04, 0xd2, 0x7e, 0x42, 0x3c, 0x8e, 0xf7, 0xe7, 0x5a, 0xcd, 0x5c, 0x7a, - 0xb3, 0x0d, 0x83, 0xa3, 0x98, 0x7d, 0xb2, 0x5a, 0x7b, 0xd2, 0x59, 0xad, 0x7f, 0xdd, 0xed, 0x18, - 0x3f, 0x37, 0xff, 0x15, 0x8e, 0xd9, 0x86, 0x19, 0x95, 0x36, 0x8f, 0xe3, 0x99, 0x63, 0xea, 0x58, - 0x33, 0xc5, 0x08, 0x16, 0x8e, 0x21, 0xa3, 0xdd, 0xde, 0x85, 0x60, 0x34, 0x07, 0x9d, 0xd8, 0x4f, - 0x11, 0xd0, 0x7f, 0x4e, 0xc2, 0x89, 0x8b, 0xae, 0x67, 0xdf, 0x11, 0x59, 0x5e, 0xb9, 0xea, 0x96, - 0x56, 0x55, 0xfb, 0xa7, 0x1e, 0xfa, 0x10, 0xa6, 0x04, 0x7b, 0x25, 0xc2, 0x89, 0xf4, 0x51, 0x3a, - 0x7f, 0x6e, 0x38, 0xae, 0xfd, 0xc2, 0xb0, 0x4e, 0x39, 0x69, 0x7b, 0xb5, 0xfd, 0x0e, 0x87, 0xa8, - 0xe8, 0x26, 0x68, 0xac, 0x46, 0x2d, 0xe5, 0xc9, 0x57, 0x8d, 0xbd, 0xaf, 0x21, 0x46, 0x9f, 0x8d, - 0x6e, 0xd4, 0xa8, 0xd5, 0x2e, 0x26, 0xe2, 0x09, 0x4b, 0x58, 0x44, 0x61, 0x82, 0xc9, 0x80, 0x53, - 0xbe, 0x7b, 0x6d, 0x54, 0x03, 0x12, 0xa4, 0x30, 0xab, 0x4c, 0x4c, 0xf8, 0xcf, 0x58, 0x81, 0xeb, - 0x9f, 0xa7, 0x60, 0xa9, 0x8f, 0x66, 0xd1, 0x75, 0x4a, 0xb6, 0x2c, 0xf6, 0x17, 0x41, 0xe3, 0xbb, - 0xb5, 0x20, 0xd8, 0x57, 0x82, 0xdd, 0x6e, 0xee, 0xd6, 0x44, 0x3b, 0x7a, 0x66, 0x90, 0xbe, 0x90, - 0xc3, 0x12, 0x01, 0xad, 0x85, 0xa7, 0x4a, 0xc6, 0xb0, 0xd4, 0xb6, 0x1e, 0x35, 0x73, 0x3d, 0xee, - 0x5f, 0x46, 0x88, 0x14, 0xdf, 0xbc, 0xa8, 0x0d, 0x15, 0xc2, 0xf8, 0xa6, 0x47, 0x1c, 0xe6, 0x5b, - 0xb2, 0xab, 0x41, 0xac, 0x9f, 0x19, 0xce, 0xdd, 0x42, 0xa3, 0xb0, 0xa8, 0x76, 0x81, 0xd6, 0xba, - 0xd0, 0x70, 0x0f, 0x0b, 0xe8, 0x59, 0x98, 0xf0, 0x28, 0x61, 0xae, 0xa3, 0x5a, 0x4f, 0x48, 0x2e, - 0x96, 0x6f, 0xb1, 0x5a, 0x45, 0xcf, 0xc1, 0x64, 0x95, 0x32, 0x46, 0xca, 0x34, 0x33, 0x2e, 0x05, - 0xe7, 0x94, 0xe0, 0xe4, 0xba, 0xff, 0x1a, 0x07, 0xeb, 0xfa, 0x83, 0x04, 0x9c, 0xea, 0xc3, 0xe3, - 0x9a, 0xcd, 0x38, 0xba, 0xd1, 0x15, 0xcf, 0xc6, 0x90, 0xb5, 0xc3, 0x66, 0x7e, 0x34, 0xcf, 0x2b, - 0xdb, 0x53, 0xc1, 0x9b, 0x48, 0x2c, 0xdf, 0x80, 0x71, 0x9b, 0xd3, 0xaa, 0xf0, 0x4a, 0x6a, 0x39, - 0x9d, 0x7f, 0x79, 0xc4, 0x58, 0x2b, 0x1c, 0x51, 0x36, 0xc6, 0x2f, 0x09, 0x34, 0xec, 0x83, 0xea, - 0xbf, 0x24, 0xfb, 0x9e, 0x4d, 0x04, 0x3c, 0xfa, 0x18, 0x66, 0xe5, 0x93, 0x5f, 0x99, 0x31, 0xbd, - 0xa5, 0x4e, 0x38, 0x30, 0xa7, 0xf6, 0x68, 0xe8, 0x85, 0xe3, 0x6a, 0x2b, 0xb3, 0x1b, 0x31, 0x68, - 0xdc, 0x61, 0x0a, 0x9d, 0x87, 0x74, 0xd5, 0x76, 0x30, 0xad, 0x55, 0x6c, 0x8b, 0x30, 0x75, 0x2f, - 0x92, 0x2d, 0x69, 0xbd, 0xfd, 0x1a, 0x47, 0x65, 0xd0, 0x8b, 0x90, 0xae, 0x92, 0xdb, 0xa1, 0x4a, - 0x4a, 0xaa, 0x1c, 0x55, 0xf6, 0xd2, 0xeb, 0xed, 0x25, 0x1c, 0x95, 0x43, 0xd7, 0x44, 0x34, 0x88, - 0x2a, 0xcd, 0x32, 0x9a, 0xa4, 0xf9, 0xcc, 0xa0, 0xf3, 0xa9, 0x22, 0x2f, 0x4a, 0x44, 0x24, 0x72, - 0x24, 0x04, 0x0e, 0xb0, 0xf4, 0x1f, 0x35, 0x38, 0xbd, 0x67, 0xee, 0xa3, 0xb7, 0x00, 0xb9, 0x5b, - 0x8c, 0x7a, 0x0d, 0x5a, 0xba, 0xe0, 0x5f, 0xfa, 0xc5, 0xfd, 0x44, 0x70, 0x9c, 0xf2, 0x5b, 0xe2, - 0x95, 0xae, 0x55, 0xdc, 0x43, 0x03, 0x59, 0x70, 0x44, 0x24, 0x83, 0x4f, 0xa8, 0xad, 0xae, 0x42, - 0xfb, 0xcb, 0xb4, 0x85, 0x56, 0x33, 0x77, 0x64, 0x2d, 0x0a, 0x82, 0xe3, 0x98, 0x68, 0x15, 0xe6, - 0x54, 0xad, 0xef, 0x20, 0xf8, 0x84, 0x62, 0x60, 0xae, 0x18, 0x5f, 0xc6, 0x9d, 0xf2, 0x02, 0xa2, - 0x44, 0x99, 0xed, 0xd1, 0x52, 0x08, 0xa1, 0xc5, 0x21, 0xde, 0x88, 0x2f, 0xe3, 0x4e, 0x79, 0x54, - 0x81, 0x59, 0x85, 0xaa, 0xf8, 0xce, 0x8c, 0x4b, 0x97, 0x3d, 0x3f, 0xa4, 0xcb, 0xfc, 0xa2, 0x1b, - 0xc6, 0x60, 0x31, 0x86, 0x85, 0x3b, 0xb0, 0x11, 0x07, 0xb0, 0x82, 0x12, 0xc7, 0x32, 0x13, 0xd2, - 0xd2, 0xeb, 0x23, 0xe6, 0x60, 0x58, 0x2b, 0xdb, 0xed, 0x2b, 0x7c, 0xc5, 0x70, 0xc4, 0x8e, 0xfe, - 0xbd, 0x06, 0xd0, 0x8e, 0x30, 0xb4, 0x12, 0x2b, 0xf2, 0x4b, 0x1d, 0x45, 0x7e, 0x3e, 0x7a, 0x39, - 0x8d, 0x14, 0xf4, 0xeb, 0x30, 0xe1, 0xca, 0xcc, 0x53, 0xc1, 0x90, 0x1f, 0xb4, 0xed, 0xb0, 0x97, - 0x86, 0x68, 0x05, 0x10, 0xa5, 0x53, 0xe5, 0xaf, 0x42, 0x43, 0x97, 0x41, 0xab, 0xb9, 0xa5, 0xa0, - 0xf9, 0x9d, 0x1b, 0x84, 0x7a, 0xd5, 0x2d, 0xb1, 0x18, 0xe6, 0x94, 0xd8, 0xbb, 0x78, 0x8b, 0x25, - 0x0e, 0xfa, 0x00, 0xa6, 0x82, 0xeb, 0x86, 0xba, 0x9b, 0xac, 0x0c, 0xc2, 0xec, 0x35, 0x03, 0x17, - 0x66, 0x44, 0x05, 0x0d, 0x56, 0x70, 0x88, 0x89, 0x3e, 0x4b, 0xc0, 0x82, 0xd5, 0x39, 0xd3, 0x65, - 0x26, 0x87, 0x6b, 0xdd, 0x7b, 0x8e, 0xdd, 0x85, 0xff, 0xb5, 0x9a, 0xb9, 0x85, 0x2e, 0x11, 0xdc, - 0x6d, 0x4e, 0x1c, 0x92, 0xaa, 0x2b, 0xab, 0x6c, 0x38, 0x43, 0x1c, 0xb2, 0xd7, 0xec, 0xe1, 0x1f, - 0x32, 0x58, 0xc1, 0x21, 0xa6, 0xfe, 0x83, 0x06, 0x33, 0xb1, 0xbb, 0xf0, 0xdf, 0x11, 0x33, 0x7e, - 0x6a, 0x1d, 0x6c, 0xcc, 0xf8, 0x98, 0x07, 0x1f, 0x33, 0x3e, 0xee, 0xa1, 0xc6, 0x8c, 0x6f, 0xf2, - 0x30, 0x63, 0x26, 0x72, 0xc8, 0x1e, 0x31, 0xf3, 0x20, 0x05, 0xa8, 0x3b, 0xe7, 0x91, 0x05, 0x13, - 0xfe, 0xd0, 0x75, 0x10, 0xbd, 0x3e, 0xbc, 0x7f, 0xa9, 0xb6, 0xae, 0xa0, 0x3b, 0x46, 0xb5, 0xe4, - 0x50, 0xa3, 0x1a, 0x3d, 0x88, 0x91, 0x36, 0xbc, 0x0c, 0xf4, 0x1d, 0x6b, 0x6f, 0xc2, 0x14, 0x0b, - 0x66, 0x41, 0x6d, 0xf4, 0x59, 0x50, 0xb2, 0x1e, 0x4e, 0x81, 0x21, 0x24, 0x2a, 0xc1, 0x0c, 0x89, - 0x8e, 0x63, 0xe3, 0x23, 0x1d, 0x63, 0x5e, 0xcc, 0x7e, 0xb1, 0x39, 0x2c, 0x86, 0xaa, 0xff, 0xda, - 0xe9, 0x5b, 0xbf, 0x2a, 0xfc, 0x63, 0x7d, 0x7b, 0x78, 0x53, 0xf1, 0x7f, 0xc2, 0xbd, 0xdf, 0x24, - 0x61, 0xbe, 0xb3, 0xb1, 0x8e, 0xf4, 0xf9, 0xe3, 0x4e, 0xcf, 0x6f, 0x38, 0xc9, 0x91, 0x36, 0x1d, - 0xce, 0x6a, 0x43, 0x7e, 0x9d, 0x8d, 0x7a, 0x22, 0x75, 0xe0, 0x9e, 0xd0, 0xbf, 0x8d, 0x73, 0x34, - 0xfa, 0x27, 0xa2, 0x3e, 0x1f, 0x54, 0x93, 0x87, 0xf4, 0x41, 0xf5, 0x09, 0xd3, 0xf4, 0x5d, 0x12, - 0x8e, 0x3d, 0xfd, 0x4f, 0x61, 0xf8, 0xaf, 0x8f, 0x3f, 0x75, 0xf3, 0xf5, 0xf4, 0x9f, 0x81, 0x61, - 0x02, 0xb9, 0x70, 0xf6, 0xee, 0xc3, 0xec, 0xd8, 0xbd, 0x87, 0xd9, 0xb1, 0xfb, 0x0f, 0xb3, 0x63, - 0x9f, 0xb6, 0xb2, 0x89, 0xbb, 0xad, 0x6c, 0xe2, 0x5e, 0x2b, 0x9b, 0xb8, 0xdf, 0xca, 0x26, 0x7e, - 0x6b, 0x65, 0x13, 0x5f, 0xfc, 0x9e, 0x1d, 0x7b, 0x6f, 0x52, 0xb5, 0x9e, 0xbf, 0x02, 0x00, 0x00, - 0xff, 0xff, 0x4d, 0x4c, 0xa8, 0x42, 0x87, 0x1c, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_ea74040359c1ed83) +} + +var fileDescriptor_ea74040359c1ed83 = []byte{ + // 1549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0x1b, 0xc5, + 0x17, 0x8f, 0xed, 0x4d, 0x9a, 0x3c, 0xa7, 0xf9, 0x98, 0xf6, 0xdf, 0xba, 0xe9, 0xbf, 0x76, 0xb4, + 0xfa, 0xeb, 0xaf, 0x50, 0xc1, 0xba, 0x35, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x2e, 0xd0, 0x8a, 0xb8, + 0x2d, 0x93, 0xb4, 0x42, 0xd0, 0x22, 0x26, 0xeb, 0xa9, 0xb3, 0xc4, 0xde, 0xb5, 0x76, 0xc6, 0x51, + 0x53, 0x84, 0x84, 0x90, 0xb8, 0x73, 0x81, 0x33, 0x48, 0x5c, 0x11, 0xe2, 0x02, 0x67, 0x6e, 0x3d, + 0xf6, 0xd8, 0x0a, 0x64, 0x51, 0x73, 0xe0, 0xcc, 0xb5, 0x27, 0x34, 0xb3, 0xb3, 0xeb, 0x5d, 0xdb, + 0x6b, 0x3b, 0x6e, 0x1a, 0x3e, 0xd4, 0x9b, 0x77, 0xe7, 0xbd, 0xdf, 0x9b, 0xf9, 0xbd, 0xaf, 0x79, + 0x6b, 0x30, 0x76, 0x5e, 0x66, 0x86, 0xe5, 0xe4, 0x49, 0xc3, 0xca, 0x93, 0x26, 0x77, 0x98, 0x49, + 0x6a, 0x96, 0x5d, 0xcd, 0xef, 0x16, 0xb6, 0x28, 0x27, 0xe7, 0xf3, 0x55, 0x6a, 0x53, 0x97, 0x70, + 0x5a, 0x31, 0x1a, 0xae, 0xc3, 0x1d, 0x94, 0xf5, 0xe4, 0x0d, 0xd2, 0xb0, 0x8c, 0x90, 0xbc, 0xa1, + 0xe4, 0x97, 0x9e, 0xab, 0x5a, 0x7c, 0xbb, 0xb9, 0x65, 0x98, 0x4e, 0x3d, 0x5f, 0x75, 0xaa, 0x4e, + 0x5e, 0xaa, 0x6d, 0x35, 0x6f, 0xc9, 0x27, 0xf9, 0x20, 0x7f, 0x79, 0x70, 0x4b, 0x7a, 0xc8, 0xbc, + 0xe9, 0xb8, 0x34, 0xbf, 0xdb, 0x63, 0x72, 0x69, 0xb5, 0x23, 0x53, 0x27, 0xe6, 0xb6, 0x65, 0x53, + 0x77, 0x2f, 0xdf, 0xd8, 0xa9, 0x4a, 0x25, 0x97, 0x32, 0xa7, 0xe9, 0x9a, 0x74, 0x5f, 0x5a, 0x2c, + 0x5f, 0xa7, 0x9c, 0xf4, 0xb3, 0x95, 0x8f, 0xd3, 0x72, 0x9b, 0x36, 0xb7, 0xea, 0xbd, 0x66, 0x5e, + 0x1c, 0xa6, 0xc0, 0xcc, 0x6d, 0x5a, 0x27, 0xdd, 0x7a, 0xfa, 0xef, 0x49, 0x38, 0x53, 0x72, 0x6c, + 0x4e, 0x84, 0x06, 0x56, 0x87, 0x28, 0x53, 0xee, 0x5a, 0xe6, 0x86, 0xfc, 0x8d, 0x4a, 0xa0, 0xd9, + 0xa4, 0x4e, 0x33, 0x89, 0xe5, 0xc4, 0xca, 0x4c, 0x31, 0x7f, 0xb7, 0x95, 0x9b, 0x68, 0xb7, 0x72, + 0xda, 0x65, 0x52, 0xa7, 0x8f, 0x5a, 0xb9, 0x5c, 0x2f, 0x71, 0x86, 0x0f, 0x23, 0x44, 0xb0, 0x54, + 0x46, 0xef, 0x40, 0x86, 0x13, 0xb7, 0x4a, 0xf9, 0xda, 0x2e, 0x75, 0x49, 0x95, 0x5e, 0xe3, 0x56, + 0xcd, 0xba, 0x43, 0xb8, 0xe5, 0xd8, 0x99, 0xe4, 0x72, 0x62, 0x65, 0xb2, 0xf8, 0xdf, 0x76, 0x2b, + 0x97, 0xd9, 0x8c, 0x91, 0xc1, 0xb1, 0xda, 0x68, 0x17, 0x50, 0x64, 0xed, 0x3a, 0xa9, 0x35, 0x69, + 0x26, 0xb5, 0x9c, 0x58, 0x49, 0x17, 0x0c, 0xa3, 0x13, 0x25, 0x01, 0x2b, 0x46, 0x63, 0xa7, 0x2a, + 0xc3, 0xc6, 0x77, 0x99, 0xf1, 0x76, 0x93, 0xd8, 0xdc, 0xe2, 0x7b, 0xc5, 0x13, 0xed, 0x56, 0x0e, + 0x6d, 0xf6, 0xa0, 0xe1, 0x3e, 0x16, 0x50, 0x1e, 0x66, 0x4c, 0x9f, 0xb7, 0x8c, 0x26, 0xb9, 0x59, + 0x54, 0xdc, 0xcc, 0x74, 0x08, 0xed, 0xc8, 0xe8, 0x7f, 0x0c, 0x60, 0x9a, 0x13, 0xde, 0x64, 0x07, + 0xc3, 0xf4, 0x7b, 0x70, 0xca, 0x6c, 0xba, 0x2e, 0xb5, 0xe3, 0xa9, 0x3e, 0xd3, 0x6e, 0xe5, 0x4e, + 0x95, 0xe2, 0x84, 0x70, 0xbc, 0x3e, 0xfa, 0x18, 0x8e, 0x45, 0x17, 0x1f, 0x87, 0xed, 0xd3, 0xea, + 0x80, 0xc7, 0x4a, 0xbd, 0x90, 0xb8, 0x9f, 0x9d, 0xfd, 0x73, 0xfe, 0x45, 0x02, 0x4e, 0x97, 0x5c, + 0x87, 0xb1, 0xeb, 0xd4, 0x65, 0x96, 0x63, 0x5f, 0xd9, 0xfa, 0x90, 0x9a, 0x1c, 0xd3, 0x5b, 0xd4, + 0xa5, 0xb6, 0x49, 0xd1, 0x32, 0x68, 0x3b, 0x96, 0x5d, 0x51, 0x8c, 0xcf, 0xfa, 0x8c, 0xbf, 0x65, + 0xd9, 0x15, 0x2c, 0x57, 0x84, 0x84, 0xf4, 0x49, 0x32, 0x2a, 0x11, 0x22, 0xbc, 0x00, 0x40, 0x1a, + 0x96, 0x32, 0x20, 0xa9, 0x98, 0x29, 0x22, 0x25, 0x07, 0x6b, 0x57, 0x2f, 0xa9, 0x15, 0x1c, 0x92, + 0xd2, 0xbf, 0x4c, 0xc1, 0xf1, 0xd7, 0x6f, 0x73, 0xea, 0xda, 0xa4, 0x16, 0x49, 0xb6, 0x02, 0x40, + 0x5d, 0x3e, 0x5f, 0xee, 0x04, 0x42, 0x00, 0x56, 0x0e, 0x56, 0x70, 0x48, 0x0a, 0x39, 0x30, 0xe7, + 0x3d, 0x6d, 0xd0, 0x1a, 0x35, 0xb9, 0xe3, 0xca, 0xcd, 0xa6, 0x0b, 0xcf, 0x0f, 0xf2, 0x07, 0x33, + 0x44, 0xe9, 0x31, 0x76, 0xcf, 0x1b, 0xeb, 0x64, 0x8b, 0xd6, 0x7c, 0xd5, 0x22, 0x6a, 0xb7, 0x72, + 0x73, 0xe5, 0x08, 0x1c, 0xee, 0x82, 0x47, 0x04, 0xd2, 0x5e, 0x42, 0x3c, 0x8e, 0xf7, 0xe7, 0xdb, + 0xad, 0x5c, 0x7a, 0xb3, 0x03, 0x83, 0xc3, 0x98, 0x31, 0x59, 0xad, 0x3d, 0xe9, 0xac, 0xd6, 0xbf, + 0xea, 0x75, 0x8c, 0x97, 0x9b, 0xff, 0x08, 0xc7, 0x6c, 0xc3, 0xac, 0x4a, 0x9b, 0xc7, 0xf1, 0xcc, + 0x71, 0x75, 0xac, 0xd9, 0x52, 0x08, 0x0b, 0x47, 0x90, 0xd1, 0x5e, 0xff, 0x42, 0x30, 0x9e, 0x83, + 0x4e, 0xee, 0xa7, 0x08, 0xe8, 0x3f, 0x25, 0xe1, 0xe4, 0x45, 0xc7, 0xb5, 0xee, 0x88, 0x2c, 0xaf, + 0x5d, 0x75, 0x2a, 0x6b, 0xaa, 0xfd, 0x53, 0x17, 0x7d, 0x00, 0xd3, 0x82, 0xbd, 0x0a, 0xe1, 0x44, + 0xfa, 0x28, 0x5d, 0x38, 0x37, 0x1a, 0xd7, 0x5e, 0x61, 0x28, 0x53, 0x4e, 0x3a, 0x5e, 0xed, 0xbc, + 0xc3, 0x01, 0x2a, 0xba, 0x09, 0x1a, 0x6b, 0x50, 0x53, 0x79, 0xf2, 0x15, 0x63, 0xf0, 0x35, 0xc4, + 0x88, 0xd9, 0xe8, 0x46, 0x83, 0x9a, 0x9d, 0x62, 0x22, 0x9e, 0xb0, 0x84, 0x45, 0x14, 0xa6, 0x98, + 0x0c, 0x38, 0xe5, 0xbb, 0x57, 0xc7, 0x35, 0x20, 0x41, 0x8a, 0x73, 0xca, 0xc4, 0x94, 0xf7, 0x8c, + 0x15, 0xb8, 0xfe, 0x59, 0x0a, 0x96, 0x63, 0x34, 0x4b, 0x8e, 0x5d, 0xb1, 0x64, 0xb1, 0xbf, 0x08, + 0x1a, 0xdf, 0x6b, 0xf8, 0xc1, 0xbe, 0xea, 0xef, 0x76, 0x73, 0xaf, 0x21, 0xda, 0xd1, 0xff, 0x86, + 0xe9, 0x0b, 0x39, 0x2c, 0x11, 0xd0, 0x7a, 0x70, 0xaa, 0x64, 0x04, 0x4b, 0x6d, 0xeb, 0x51, 0x2b, + 0xd7, 0xe7, 0xfe, 0x65, 0x04, 0x48, 0xd1, 0xcd, 0x8b, 0xda, 0x50, 0x23, 0x8c, 0x6f, 0xba, 0xc4, + 0x66, 0x9e, 0x25, 0xab, 0xee, 0xc7, 0xfa, 0xd9, 0xd1, 0xdc, 0x2d, 0x34, 0x8a, 0x4b, 0x6a, 0x17, + 0x68, 0xbd, 0x07, 0x0d, 0xf7, 0xb1, 0x80, 0xfe, 0x0f, 0x53, 0x2e, 0x25, 0xcc, 0xb1, 0x55, 0xeb, + 0x09, 0xc8, 0xc5, 0xf2, 0x2d, 0x56, 0xab, 0xe8, 0x19, 0x38, 0x52, 0xa7, 0x8c, 0x91, 0x2a, 0xcd, + 0x4c, 0x4a, 0xc1, 0x79, 0x25, 0x78, 0xa4, 0xec, 0xbd, 0xc6, 0xfe, 0xba, 0xfe, 0x20, 0x01, 0xa7, + 0x63, 0x78, 0x5c, 0xb7, 0x18, 0x47, 0x37, 0x7a, 0xe2, 0xd9, 0x18, 0xb1, 0x76, 0x58, 0xcc, 0x8b, + 0xe6, 0x05, 0x65, 0x7b, 0xda, 0x7f, 0x13, 0x8a, 0xe5, 0x1b, 0x30, 0x69, 0x71, 0x5a, 0x17, 0x5e, + 0x49, 0xad, 0xa4, 0x0b, 0x2f, 0x8d, 0x19, 0x6b, 0xc5, 0xa3, 0xca, 0xc6, 0xe4, 0x25, 0x81, 0x86, + 0x3d, 0x50, 0xfd, 0xe7, 0x64, 0xec, 0xd9, 0x44, 0xc0, 0xa3, 0x8f, 0x60, 0x4e, 0x3e, 0x79, 0x95, + 0x19, 0xd3, 0x5b, 0xea, 0x84, 0x43, 0x73, 0x6a, 0x40, 0x43, 0x2f, 0x9e, 0x50, 0x5b, 0x99, 0xdb, + 0x88, 0x40, 0xe3, 0x2e, 0x53, 0xe8, 0x3c, 0xa4, 0xeb, 0x96, 0x8d, 0x69, 0xa3, 0x66, 0x99, 0x84, + 0xa9, 0x7b, 0x91, 0x6c, 0x49, 0xe5, 0xce, 0x6b, 0x1c, 0x96, 0x41, 0x2f, 0x40, 0xba, 0x4e, 0x6e, + 0x07, 0x2a, 0x29, 0xa9, 0x72, 0x4c, 0xd9, 0x4b, 0x97, 0x3b, 0x4b, 0x38, 0x2c, 0x87, 0xae, 0x89, + 0x68, 0x10, 0x55, 0x9a, 0x65, 0x34, 0x49, 0xf3, 0xd9, 0x61, 0xe7, 0x53, 0x45, 0x5e, 0x94, 0x88, + 0x50, 0xe4, 0x48, 0x08, 0xec, 0x63, 0xe9, 0x3f, 0x68, 0x70, 0x66, 0x60, 0xee, 0xa3, 0x37, 0x00, + 0x39, 0x5b, 0x8c, 0xba, 0xbb, 0xb4, 0xf2, 0xa6, 0x77, 0xe9, 0x17, 0xf7, 0x13, 0xc1, 0x71, 0xca, + 0x6b, 0x89, 0x57, 0x7a, 0x56, 0x71, 0x1f, 0x0d, 0x64, 0xc2, 0x51, 0x91, 0x0c, 0x1e, 0xa1, 0x96, + 0xba, 0x0a, 0xed, 0x2f, 0xd3, 0x16, 0xdb, 0xad, 0xdc, 0xd1, 0xf5, 0x30, 0x08, 0x8e, 0x62, 0xa2, + 0x35, 0x98, 0x57, 0xb5, 0xbe, 0x8b, 0xe0, 0x93, 0x8a, 0x81, 0xf9, 0x52, 0x74, 0x19, 0x77, 0xcb, + 0x0b, 0x88, 0x0a, 0x65, 0x96, 0x4b, 0x2b, 0x01, 0x84, 0x16, 0x85, 0xb8, 0x10, 0x5d, 0xc6, 0xdd, + 0xf2, 0xa8, 0x06, 0x73, 0x0a, 0x55, 0xf1, 0x9d, 0x99, 0x94, 0x2e, 0x7b, 0x76, 0x44, 0x97, 0x79, + 0x45, 0x37, 0x88, 0xc1, 0x52, 0x04, 0x0b, 0x77, 0x61, 0x23, 0x0e, 0x60, 0xfa, 0x25, 0x8e, 0x65, + 0xa6, 0xa4, 0xa5, 0xd7, 0xc6, 0xcc, 0xc1, 0xa0, 0x56, 0x76, 0xda, 0x57, 0xf0, 0x8a, 0xe1, 0x90, + 0x1d, 0xfd, 0x3b, 0x0d, 0xa0, 0x13, 0x61, 0x68, 0x35, 0x52, 0xe4, 0x97, 0xbb, 0x8a, 0xfc, 0x42, + 0xf8, 0x72, 0x1a, 0x2a, 0xe8, 0xd7, 0x61, 0xca, 0x91, 0x99, 0xa7, 0x82, 0xa1, 0x30, 0x6c, 0xdb, + 0x41, 0x2f, 0x0d, 0xd0, 0x8a, 0x20, 0x4a, 0xa7, 0xca, 0x5f, 0x85, 0x86, 0x2e, 0x83, 0xd6, 0x70, + 0x2a, 0x7e, 0xf3, 0x3b, 0x37, 0x0c, 0xf5, 0xaa, 0x53, 0x61, 0x11, 0xcc, 0x69, 0xb1, 0x77, 0xf1, + 0x16, 0x4b, 0x1c, 0xf4, 0x3e, 0x4c, 0xfb, 0xd7, 0x0d, 0x75, 0x37, 0x59, 0x1d, 0x86, 0xd9, 0x6f, + 0x06, 0x2e, 0xce, 0x8a, 0x0a, 0xea, 0xaf, 0xe0, 0x00, 0x13, 0x7d, 0x9a, 0x80, 0x45, 0xb3, 0x7b, + 0xa6, 0xcb, 0x1c, 0x19, 0xad, 0x75, 0x0f, 0x1c, 0xbb, 0x8b, 0xff, 0x69, 0xb7, 0x72, 0x8b, 0x3d, + 0x22, 0xb8, 0xd7, 0x9c, 0x38, 0x24, 0x55, 0x57, 0x56, 0xd9, 0x70, 0x46, 0x38, 0x64, 0xbf, 0xd9, + 0xc3, 0x3b, 0xa4, 0xbf, 0x82, 0x03, 0x4c, 0xfd, 0x7b, 0x0d, 0x66, 0x23, 0x77, 0xe1, 0xbf, 0x22, + 0x66, 0xbc, 0xd4, 0x3a, 0xd8, 0x98, 0xf1, 0x30, 0x0f, 0x3e, 0x66, 0x3c, 0xdc, 0x43, 0x8d, 0x19, + 0xcf, 0xe4, 0x61, 0xc6, 0x4c, 0xe8, 0x90, 0x7d, 0x62, 0xe6, 0x41, 0x0a, 0x50, 0x6f, 0xce, 0x23, + 0x13, 0xa6, 0xbc, 0xa1, 0xeb, 0x20, 0x7a, 0x7d, 0x70, 0xff, 0x52, 0x6d, 0x5d, 0x41, 0x77, 0x8d, + 0x6a, 0xc9, 0x91, 0x46, 0x35, 0x7a, 0x10, 0x23, 0x6d, 0x70, 0x19, 0x88, 0x1d, 0x6b, 0x6f, 0xc2, + 0x34, 0xf3, 0x67, 0x41, 0x6d, 0xfc, 0x59, 0x50, 0xb2, 0x1e, 0x4c, 0x81, 0x01, 0x24, 0xaa, 0xc0, + 0x2c, 0x09, 0x8f, 0x63, 0x93, 0x63, 0x1d, 0x63, 0x41, 0xcc, 0x7e, 0x91, 0x39, 0x2c, 0x82, 0xaa, + 0xff, 0xd2, 0xed, 0x5b, 0xaf, 0x2a, 0xfc, 0x6d, 0x7d, 0x7b, 0x78, 0x53, 0xf1, 0xbf, 0xc2, 0xbd, + 0x5f, 0x27, 0x61, 0xa1, 0xbb, 0xb1, 0x8e, 0xf5, 0xf9, 0xe3, 0x4e, 0xdf, 0x6f, 0x38, 0xc9, 0xb1, + 0x36, 0x1d, 0xcc, 0x6a, 0x23, 0x7e, 0x9d, 0x0d, 0x7b, 0x22, 0x75, 0xe0, 0x9e, 0xd0, 0xbf, 0x89, + 0x72, 0x34, 0xfe, 0x27, 0xa2, 0x98, 0x0f, 0xaa, 0xc9, 0x43, 0xfa, 0xa0, 0xfa, 0x84, 0x69, 0xfa, + 0x36, 0x09, 0xc7, 0x9f, 0xfe, 0xa7, 0x30, 0xfa, 0xd7, 0xc7, 0x1f, 0x7b, 0xf9, 0x7a, 0xfa, 0xcf, + 0xc0, 0x28, 0x81, 0x5c, 0xbc, 0x70, 0xf7, 0x61, 0x76, 0xe2, 0xde, 0xc3, 0xec, 0xc4, 0xfd, 0x87, + 0xd9, 0x89, 0x4f, 0xda, 0xd9, 0xc4, 0xdd, 0x76, 0x36, 0x71, 0xaf, 0x9d, 0x4d, 0xdc, 0x6f, 0x67, + 0x13, 0xbf, 0xb6, 0xb3, 0x89, 0xcf, 0x7f, 0xcb, 0x4e, 0xbc, 0x9b, 0x1d, 0xfc, 0x27, 0xe3, 0x9f, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x05, 0xaa, 0x18, 0x85, 0x1c, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 27f9ab45c..232a59815 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v2beta1"; +option go_package = "k8s.io/api/autoscaling/v2beta1"; // ContainerResourceMetricSource indicates how to scale on a resource metric known to // Kubernetes, as specified in requests and limits, describing each pod in the @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -86,10 +86,10 @@ message ContainerResourceMetricStatus { // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; // API version of the referent @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,14 +131,14 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // HorizontalPodAutoscaler is the configuration for a horizontal pod @@ -148,7 +148,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -172,7 +172,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -188,7 +188,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -220,6 +220,7 @@ message HorizontalPodAutoscalerSpec { // increased, and vice-versa. See the individual metric source types for // more information about how each type of metric must respond. // +optional + // +listType=atomic repeated MetricSpec metrics = 4; } @@ -232,7 +233,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -244,11 +245,13 @@ message HorizontalPodAutoscalerStatus { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic repeated MetricStatus currentMetrics = 5; // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic repeated HorizontalPodAutoscalerCondition conditions = 6; } @@ -352,18 +355,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -376,18 +379,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -400,13 +403,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -417,13 +420,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -447,7 +450,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -471,6 +474,6 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 22bb7699e..193cc4354 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -24,9 +24,9 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent // +optional @@ -56,6 +56,7 @@ type HorizontalPodAutoscalerSpec struct { // increased, and vice-versa. See the individual metric source types for // more information about how each type of metric must respond. // +optional + // +listType=atomic Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` } @@ -260,11 +261,13 @@ type HorizontalPodAutoscalerStatus struct { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index 08a54621f..d656ee416 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -53,8 +53,8 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "apiVersion": "API version of the referent", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index cece3c877..741979505 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +// source: k8s.io/api/autoscaling/v2beta2/generated.proto package v2beta2 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} } func (*ContainerResourceMetricSource) ProtoMessage() {} func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{0} + return fileDescriptor_1076ab1fac987148, []int{0} } func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} } func (*ContainerResourceMetricStatus) ProtoMessage() {} func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{1} + return fileDescriptor_1076ab1fac987148, []int{1} } func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{2} + return fileDescriptor_1076ab1fac987148, []int{2} } func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } func (*ExternalMetricSource) ProtoMessage() {} func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{3} + return fileDescriptor_1076ab1fac987148, []int{3} } func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } func (*ExternalMetricStatus) ProtoMessage() {} func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{4} + return fileDescriptor_1076ab1fac987148, []int{4} } func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } func (*HPAScalingPolicy) ProtoMessage() {} func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{5} + return fileDescriptor_1076ab1fac987148, []int{5} } func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } func (*HPAScalingRules) ProtoMessage() {} func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{6} + return fileDescriptor_1076ab1fac987148, []int{6} } func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -246,7 +246,7 @@ var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } func (*HorizontalPodAutoscaler) ProtoMessage() {} func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{7} + return fileDescriptor_1076ab1fac987148, []int{7} } func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{8} + return fileDescriptor_1076ab1fac987148, []int{8} } func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -302,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{9} + return fileDescriptor_1076ab1fac987148, []int{9} } func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{10} + return fileDescriptor_1076ab1fac987148, []int{10} } func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -358,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{11} + return fileDescriptor_1076ab1fac987148, []int{11} } func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -386,7 +386,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{12} + return fileDescriptor_1076ab1fac987148, []int{12} } func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -414,7 +414,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } func (*MetricIdentifier) ProtoMessage() {} func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{13} + return fileDescriptor_1076ab1fac987148, []int{13} } func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -442,7 +442,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo func (m *MetricSpec) Reset() { *m = MetricSpec{} } func (*MetricSpec) ProtoMessage() {} func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{14} + return fileDescriptor_1076ab1fac987148, []int{14} } func (m *MetricSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -470,7 +470,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo func (m *MetricStatus) Reset() { *m = MetricStatus{} } func (*MetricStatus) ProtoMessage() {} func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{15} + return fileDescriptor_1076ab1fac987148, []int{15} } func (m *MetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -498,7 +498,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo func (m *MetricTarget) Reset() { *m = MetricTarget{} } func (*MetricTarget) ProtoMessage() {} func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{16} + return fileDescriptor_1076ab1fac987148, []int{16} } func (m *MetricTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +526,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } func (*MetricValueStatus) ProtoMessage() {} func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{17} + return fileDescriptor_1076ab1fac987148, []int{17} } func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -554,7 +554,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } func (*ObjectMetricSource) ProtoMessage() {} func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{18} + return fileDescriptor_1076ab1fac987148, []int{18} } func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -582,7 +582,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } func (*ObjectMetricStatus) ProtoMessage() {} func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{19} + return fileDescriptor_1076ab1fac987148, []int{19} } func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -610,7 +610,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } func (*PodsMetricSource) ProtoMessage() {} func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{20} + return fileDescriptor_1076ab1fac987148, []int{20} } func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -638,7 +638,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } func (*PodsMetricStatus) ProtoMessage() {} func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{21} + return fileDescriptor_1076ab1fac987148, []int{21} } func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -666,7 +666,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } func (*ResourceMetricSource) ProtoMessage() {} func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{22} + return fileDescriptor_1076ab1fac987148, []int{22} } func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -694,7 +694,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } func (*ResourceMetricStatus) ProtoMessage() {} func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{23} + return fileDescriptor_1076ab1fac987148, []int{23} } func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -747,120 +747,119 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) + proto.RegisterFile("k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_1076ab1fac987148) } -var fileDescriptor_592ad94d7d6be24f = []byte{ - // 1741 bytes of a gzipped FileDescriptorProto +var fileDescriptor_1076ab1fac987148 = []byte{ + // 1727 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, 0x15, 0xd7, 0x92, 0xd4, 0xd7, 0x50, 0x9f, 0xe3, 0x2f, 0x42, 0x86, 0x49, 0x61, 0x6b, 0xb4, 0xae, - 0x51, 0x2f, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0x5b, 0xdb, 0xb0, 0x64, 0xab, 0x43, - 0x59, 0x2d, 0x0a, 0xd9, 0xe8, 0x70, 0x77, 0x44, 0x4d, 0x45, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72, - 0x81, 0xa2, 0x08, 0x90, 0x7b, 0x90, 0x20, 0xd7, 0xfc, 0x09, 0x09, 0x7c, 0x09, 0x90, 0x63, 0x3e, + 0xd1, 0x2e, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0xdb, 0xda, 0xb0, 0x64, 0xab, 0x43, + 0x59, 0x2d, 0x02, 0xd9, 0xc8, 0x70, 0x77, 0x44, 0x4d, 0x44, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72, + 0x80, 0x20, 0x08, 0x90, 0x7b, 0x90, 0x20, 0xd7, 0xfc, 0x09, 0x09, 0x7c, 0x09, 0x90, 0x63, 0x3e, 0x60, 0x18, 0x41, 0x10, 0xf8, 0x16, 0xe7, 0x42, 0xc4, 0xcc, 0x31, 0xc7, 0xdc, 0x7c, 0x0a, 0xe6, 0x63, 0x3f, 0x49, 0x89, 0x94, 0x20, 0x29, 0xd0, 0x8d, 0x3b, 0xf3, 0xde, 0xef, 0xcd, 0x7b, 0xf3, - 0x7b, 0x6f, 0xde, 0x0c, 0xc1, 0xad, 0x9d, 0xeb, 0xae, 0x46, 0xed, 0xe2, 0x4e, 0xb3, 0x42, 0x1c, - 0x8b, 0x78, 0xc4, 0x2d, 0xb6, 0x88, 0x65, 0xda, 0x4e, 0x51, 0x4e, 0xe0, 0x06, 0x2d, 0xe2, 0xa6, - 0x67, 0xbb, 0x06, 0xae, 0x51, 0xab, 0x5a, 0x6c, 0x95, 0x2a, 0xc4, 0xc3, 0xa5, 0x62, 0x95, 0x58, - 0xc4, 0xc1, 0x1e, 0x31, 0xb5, 0x86, 0x63, 0x7b, 0x36, 0xcc, 0x0b, 0x79, 0x0d, 0x37, 0xa8, 0x16, - 0x91, 0xd7, 0xa4, 0xfc, 0xdc, 0xb5, 0x2a, 0xf5, 0xb6, 0x9b, 0x15, 0xcd, 0xb0, 0xeb, 0xc5, 0xaa, - 0x5d, 0xb5, 0x8b, 0x5c, 0xad, 0xd2, 0xdc, 0xe2, 0x5f, 0xfc, 0x83, 0xff, 0x12, 0x70, 0x73, 0x6a, - 0xc4, 0xbc, 0x61, 0x3b, 0xa4, 0xd8, 0x5a, 0x48, 0x9a, 0x9c, 0x5b, 0x0c, 0x65, 0xea, 0xd8, 0xd8, - 0xa6, 0x16, 0x71, 0x76, 0x8b, 0x8d, 0x9d, 0x2a, 0x57, 0x72, 0x88, 0x6b, 0x37, 0x1d, 0x83, 0x1c, - 0x48, 0xcb, 0x2d, 0xd6, 0x89, 0x87, 0x7b, 0xd9, 0x2a, 0xee, 0xa5, 0xe5, 0x34, 0x2d, 0x8f, 0xd6, - 0xbb, 0xcd, 0xfc, 0xa1, 0x9f, 0x82, 0x6b, 0x6c, 0x93, 0x3a, 0x4e, 0xea, 0xa9, 0x3f, 0x28, 0xe0, - 0xd2, 0xb2, 0x6d, 0x79, 0x98, 0x69, 0x20, 0xe9, 0xc4, 0x2a, 0xf1, 0x1c, 0x6a, 0x94, 0xf9, 0x6f, - 0xb8, 0x0c, 0x32, 0x16, 0xae, 0x93, 0x9c, 0x32, 0xaf, 0x5c, 0x19, 0xd7, 0x8b, 0x2f, 0xda, 0x85, - 0xa1, 0x4e, 0xbb, 0x90, 0xb9, 0x87, 0xeb, 0xe4, 0x4d, 0xbb, 0x50, 0xe8, 0x0e, 0x9c, 0xe6, 0xc3, - 0x30, 0x11, 0xc4, 0x95, 0xe1, 0x3a, 0x18, 0xf1, 0xb0, 0x53, 0x25, 0x5e, 0x2e, 0x35, 0xaf, 0x5c, - 0xc9, 0x96, 0x7e, 0xa3, 0xed, 0xbf, 0x7f, 0x9a, 0x58, 0xc2, 0x3a, 0xd7, 0xd1, 0xa7, 0xa4, 0xd1, - 0x11, 0xf1, 0x8d, 0x24, 0x16, 0x2c, 0x82, 0x71, 0xc3, 0x5f, 0x7b, 0x2e, 0xcd, 0xd7, 0x37, 0x2b, - 0x45, 0xc7, 0x43, 0xa7, 0x42, 0x19, 0xf5, 0xc7, 0x7d, 0xbc, 0xf5, 0xb0, 0xd7, 0x74, 0x8f, 0xc6, - 0xdb, 0x4d, 0x30, 0x6a, 0x34, 0x1d, 0x87, 0x58, 0xbe, 0xbb, 0x0b, 0x83, 0xb9, 0xbb, 0x81, 0x6b, - 0x4d, 0x22, 0x16, 0xa2, 0x4f, 0x4b, 0xd3, 0xa3, 0xcb, 0x02, 0x09, 0xf9, 0x90, 0x07, 0xf7, 0xfa, - 0x7d, 0x05, 0x5c, 0x5c, 0x76, 0x6c, 0xd7, 0xdd, 0x20, 0x8e, 0x4b, 0x6d, 0xeb, 0x7e, 0xe5, 0x3f, - 0xc4, 0xf0, 0x10, 0xd9, 0x22, 0x0e, 0xb1, 0x0c, 0x02, 0xe7, 0x41, 0x66, 0x87, 0x5a, 0xa6, 0xf4, - 0x79, 0xc2, 0xf7, 0xf9, 0x2e, 0xb5, 0x4c, 0xc4, 0x67, 0x98, 0x04, 0x8f, 0x4a, 0x2a, 0x2e, 0x11, - 0x71, 0xb9, 0x04, 0x00, 0x6e, 0x50, 0x69, 0x40, 0xae, 0x0a, 0x4a, 0x39, 0xb0, 0xb4, 0x76, 0x47, - 0xce, 0xa0, 0x88, 0x94, 0xfa, 0x5c, 0x01, 0x67, 0xff, 0xfa, 0xc4, 0x23, 0x8e, 0x85, 0x6b, 0x31, - 0xca, 0xfd, 0x13, 0x8c, 0xd4, 0xf9, 0x37, 0x5f, 0x52, 0xb6, 0xf4, 0xdb, 0xc1, 0xc2, 0x77, 0xc7, - 0x24, 0x96, 0x47, 0xb7, 0x28, 0x71, 0x42, 0xc6, 0x88, 0x19, 0x24, 0xf1, 0x8e, 0x87, 0x87, 0xea, - 0xd7, 0xdd, 0x8e, 0x08, 0x36, 0x1d, 0x9f, 0x23, 0xc7, 0x4a, 0x31, 0xf5, 0x43, 0x05, 0xcc, 0xdc, - 0x5e, 0x5b, 0x2a, 0x0b, 0x88, 0x35, 0xbb, 0x46, 0x8d, 0x5d, 0x78, 0x1d, 0x64, 0xbc, 0xdd, 0x86, - 0x9f, 0x1a, 0x97, 0x7d, 0x12, 0xac, 0xef, 0x36, 0x58, 0x6a, 0x9c, 0x4d, 0xca, 0xb3, 0x71, 0xc4, - 0x35, 0xe0, 0x2f, 0xc0, 0x70, 0x8b, 0xd9, 0xe5, 0x4b, 0x1d, 0xd6, 0x27, 0xa5, 0xea, 0x30, 0x5f, - 0x0c, 0x12, 0x73, 0xf0, 0x06, 0x98, 0x6c, 0x10, 0x87, 0xda, 0x66, 0x99, 0x18, 0xb6, 0x65, 0xba, - 0x9c, 0x44, 0xc3, 0xfa, 0x39, 0x29, 0x3c, 0xb9, 0x16, 0x9d, 0x44, 0x71, 0x59, 0xf5, 0x83, 0x14, - 0x98, 0x0e, 0x17, 0x80, 0x9a, 0x35, 0xe2, 0xc2, 0x47, 0x60, 0xce, 0xf5, 0x70, 0x85, 0xd6, 0xe8, - 0x53, 0xec, 0x51, 0xdb, 0xfa, 0x07, 0xb5, 0x4c, 0xfb, 0x71, 0x1c, 0x3d, 0xdf, 0x69, 0x17, 0xe6, - 0xca, 0x7b, 0x4a, 0xa1, 0x7d, 0x10, 0xe0, 0x5d, 0x30, 0xe1, 0x92, 0x1a, 0x31, 0x3c, 0xe1, 0xaf, - 0x8c, 0xcb, 0xaf, 0x3a, 0xed, 0xc2, 0x44, 0x39, 0x32, 0xfe, 0xa6, 0x5d, 0x38, 0x13, 0x0b, 0x8c, - 0x98, 0x44, 0x31, 0x65, 0xf8, 0x08, 0x8c, 0x35, 0xd8, 0x2f, 0x4a, 0xdc, 0x5c, 0x6a, 0x3e, 0x3d, - 0x08, 0x57, 0x92, 0x01, 0xd7, 0x67, 0x64, 0xa8, 0xc6, 0xd6, 0x24, 0x12, 0x0a, 0x30, 0xd5, 0x4f, - 0x53, 0xe0, 0xc2, 0x6d, 0xdb, 0xa1, 0x4f, 0x59, 0x55, 0xa8, 0xad, 0xd9, 0xe6, 0x92, 0x44, 0x24, - 0x0e, 0xfc, 0x37, 0x18, 0x63, 0xe7, 0x90, 0x89, 0x3d, 0xdc, 0x83, 0xa7, 0xc1, 0x71, 0xa2, 0x35, - 0x76, 0xaa, 0x6c, 0xc0, 0xd5, 0x98, 0xb4, 0xd6, 0x5a, 0xd0, 0x44, 0x21, 0x59, 0x25, 0x1e, 0x0e, - 0x73, 0x3d, 0x1c, 0x43, 0x01, 0x2a, 0x7c, 0x08, 0x32, 0x6e, 0x83, 0x18, 0x92, 0xaa, 0x37, 0xfa, - 0x7a, 0xd6, 0x7b, 0xa1, 0xe5, 0x06, 0x31, 0xc2, 0xe2, 0xc3, 0xbe, 0x10, 0x87, 0x85, 0x04, 0x8c, - 0xb8, 0x9c, 0xd2, 0x7c, 0x57, 0xb3, 0xa5, 0x3f, 0x1d, 0xd6, 0x80, 0xc8, 0x8b, 0x20, 0xe7, 0xc4, - 0x37, 0x92, 0xe0, 0xea, 0x37, 0x0a, 0x28, 0xec, 0xa1, 0xa9, 0x93, 0x6d, 0xdc, 0xa2, 0xb6, 0x03, - 0x37, 0xc0, 0x28, 0x1f, 0x79, 0xd0, 0x90, 0xa1, 0x2c, 0x0e, 0xbe, 0x8d, 0x9c, 0xb6, 0x7a, 0x96, - 0x65, 0x64, 0x59, 0x60, 0x20, 0x1f, 0x0c, 0x6e, 0x82, 0x71, 0xfe, 0xf3, 0xa6, 0xfd, 0xd8, 0x92, - 0x61, 0x3c, 0x30, 0xf2, 0x24, 0x3b, 0x21, 0xca, 0x3e, 0x0a, 0x0a, 0x01, 0xd5, 0xb7, 0xd3, 0x60, - 0x7e, 0x0f, 0xcf, 0x96, 0x6d, 0xcb, 0xa4, 0x8c, 0xfc, 0xf0, 0x76, 0x2c, 0xff, 0x17, 0x13, 0xf9, - 0x7f, 0xb9, 0x9f, 0x7e, 0xa4, 0x1e, 0xac, 0x04, 0xfb, 0x95, 0x8a, 0x61, 0xc9, 0x80, 0xbf, 0x69, - 0x17, 0x7a, 0xf4, 0x63, 0x5a, 0x80, 0x14, 0xdf, 0x16, 0xd8, 0x02, 0xb0, 0x86, 0x5d, 0x6f, 0xdd, - 0xc1, 0x96, 0x2b, 0x2c, 0xd1, 0x3a, 0x91, 0x4c, 0xb8, 0x3a, 0x18, 0x91, 0x99, 0x86, 0x3e, 0x27, - 0x57, 0x01, 0x57, 0xba, 0xd0, 0x50, 0x0f, 0x0b, 0xf0, 0x97, 0x60, 0xc4, 0x21, 0xd8, 0xb5, 0xad, - 0x5c, 0x86, 0x7b, 0x11, 0xd0, 0x06, 0xf1, 0x51, 0x24, 0x67, 0xe1, 0xaf, 0xc1, 0x68, 0x9d, 0xb8, - 0x2e, 0xae, 0x92, 0xdc, 0x30, 0x17, 0x0c, 0xea, 0xee, 0xaa, 0x18, 0x46, 0xfe, 0xbc, 0xfa, 0xad, - 0x02, 0x2e, 0xee, 0x11, 0xc7, 0x15, 0xea, 0x7a, 0x70, 0xb3, 0x2b, 0x53, 0xb5, 0xc1, 0x1c, 0x64, - 0xda, 0x3c, 0x4f, 0x83, 0x1a, 0xe1, 0x8f, 0x44, 0xb2, 0x74, 0x13, 0x0c, 0x53, 0x8f, 0xd4, 0xfd, - 0x02, 0xf4, 0xc7, 0x43, 0x66, 0x51, 0x58, 0xdf, 0xef, 0x30, 0x34, 0x24, 0x40, 0xd5, 0xe7, 0xe9, - 0x3d, 0x7d, 0x63, 0xa9, 0x0c, 0xff, 0x0b, 0xa6, 0xf8, 0x97, 0x3c, 0x5b, 0xc9, 0x96, 0xf4, 0xb0, + 0x7b, 0x6f, 0xde, 0x0c, 0x81, 0xb6, 0x73, 0xcd, 0xd5, 0xa8, 0x5d, 0xc4, 0x0d, 0x5a, 0xc4, 0x4d, + 0xcf, 0x76, 0x0d, 0x5c, 0xa3, 0x56, 0xb5, 0xd8, 0x2a, 0x55, 0x88, 0x87, 0x4b, 0xc5, 0x2a, 0xb1, + 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x0d, 0xc7, 0xf6, 0x6c, 0x98, 0x17, 0xf2, 0x1a, 0x6e, 0x50, 0x2d, + 0x22, 0xaf, 0x49, 0xf9, 0xb9, 0x3f, 0x56, 0xa9, 0xb7, 0xdd, 0xac, 0x68, 0x86, 0x5d, 0x2f, 0x56, + 0xed, 0xaa, 0x5d, 0xe4, 0x6a, 0x95, 0xe6, 0x16, 0xff, 0xe2, 0x1f, 0xfc, 0x97, 0x80, 0x9b, 0x53, + 0x23, 0xe6, 0x0d, 0xdb, 0x21, 0xc5, 0xd6, 0x42, 0xd2, 0xe4, 0xdc, 0x62, 0x28, 0x53, 0xc7, 0xc6, + 0x36, 0xb5, 0x88, 0xb3, 0x5b, 0x6c, 0xec, 0x54, 0xb9, 0x92, 0x43, 0x5c, 0xbb, 0xe9, 0x18, 0xe4, + 0x40, 0x5a, 0x6e, 0xb1, 0x4e, 0x3c, 0xdc, 0xcb, 0x56, 0x71, 0x2f, 0x2d, 0xa7, 0x69, 0x79, 0xb4, + 0xde, 0x6d, 0xe6, 0xaf, 0xfd, 0x14, 0x5c, 0x63, 0x9b, 0xd4, 0x71, 0x52, 0x4f, 0xfd, 0x49, 0x01, + 0x97, 0x96, 0x6d, 0xcb, 0xc3, 0x4c, 0x03, 0x49, 0x27, 0x56, 0x89, 0xe7, 0x50, 0xa3, 0xcc, 0x7f, + 0xc3, 0x65, 0x90, 0xb1, 0x70, 0x9d, 0xe4, 0x94, 0x79, 0xe5, 0xca, 0xb8, 0x5e, 0x7c, 0xd6, 0x2e, + 0x0c, 0x75, 0xda, 0x85, 0xcc, 0x1d, 0x5c, 0x27, 0xaf, 0xda, 0x85, 0x42, 0x77, 0xe0, 0x34, 0x1f, + 0x86, 0x89, 0x20, 0xae, 0x0c, 0xd7, 0xc1, 0x88, 0x87, 0x9d, 0x2a, 0xf1, 0x72, 0xa9, 0x79, 0xe5, + 0x4a, 0xb6, 0xf4, 0x07, 0x6d, 0xff, 0xfd, 0xd3, 0xc4, 0x12, 0xd6, 0xb9, 0x8e, 0x3e, 0x25, 0x8d, + 0x8e, 0x88, 0x6f, 0x24, 0xb1, 0x60, 0x11, 0x8c, 0x1b, 0xfe, 0xda, 0x73, 0x69, 0xbe, 0xbe, 0x59, + 0x29, 0x3a, 0x1e, 0x3a, 0x15, 0xca, 0xa8, 0x3f, 0xef, 0xe3, 0xad, 0x87, 0xbd, 0xa6, 0x7b, 0x34, + 0xde, 0x6e, 0x82, 0x51, 0xa3, 0xe9, 0x38, 0xc4, 0xf2, 0xdd, 0x5d, 0x18, 0xcc, 0xdd, 0x0d, 0x5c, + 0x6b, 0x12, 0xb1, 0x10, 0x7d, 0x5a, 0x9a, 0x1e, 0x5d, 0x16, 0x48, 0xc8, 0x87, 0x3c, 0xb8, 0xd7, + 0x1f, 0x2a, 0xe0, 0xe2, 0xb2, 0x63, 0xbb, 0xee, 0x06, 0x71, 0x5c, 0x6a, 0x5b, 0x77, 0x2b, 0x6f, + 0x10, 0xc3, 0x43, 0x64, 0x8b, 0x38, 0xc4, 0x32, 0x08, 0x9c, 0x07, 0x99, 0x1d, 0x6a, 0x99, 0xd2, + 0xe7, 0x09, 0xdf, 0xe7, 0xdb, 0xd4, 0x32, 0x11, 0x9f, 0x61, 0x12, 0x3c, 0x2a, 0xa9, 0xb8, 0x44, + 0xc4, 0xe5, 0x12, 0x00, 0xb8, 0x41, 0xa5, 0x01, 0xb9, 0x2a, 0x28, 0xe5, 0xc0, 0xd2, 0xda, 0x2d, + 0x39, 0x83, 0x22, 0x52, 0xea, 0x53, 0x05, 0x9c, 0xfd, 0xd7, 0x23, 0x8f, 0x38, 0x16, 0xae, 0xc5, + 0x28, 0xf7, 0x7f, 0x30, 0x52, 0xe7, 0xdf, 0x7c, 0x49, 0xd9, 0xd2, 0x9f, 0x06, 0x0b, 0xdf, 0x2d, + 0x93, 0x58, 0x1e, 0xdd, 0xa2, 0xc4, 0x09, 0x19, 0x23, 0x66, 0x90, 0xc4, 0x3b, 0x1e, 0x1e, 0xaa, + 0xdf, 0x76, 0x3b, 0x22, 0xd8, 0x74, 0x7c, 0x8e, 0x1c, 0x2b, 0xc5, 0xd4, 0x8f, 0x15, 0x30, 0x73, + 0x73, 0x6d, 0xa9, 0x2c, 0x20, 0xd6, 0xec, 0x1a, 0x35, 0x76, 0xe1, 0x35, 0x90, 0xf1, 0x76, 0x1b, + 0x7e, 0x6a, 0x5c, 0xf6, 0x49, 0xb0, 0xbe, 0xdb, 0x60, 0xa9, 0x71, 0x36, 0x29, 0xcf, 0xc6, 0x11, + 0xd7, 0x80, 0xbf, 0x01, 0xc3, 0x2d, 0x66, 0x97, 0x2f, 0x75, 0x58, 0x9f, 0x94, 0xaa, 0xc3, 0x7c, + 0x31, 0x48, 0xcc, 0xc1, 0xeb, 0x60, 0xb2, 0x41, 0x1c, 0x6a, 0x9b, 0x65, 0x62, 0xd8, 0x96, 0xe9, + 0x72, 0x12, 0x0d, 0xeb, 0xe7, 0xa4, 0xf0, 0xe4, 0x5a, 0x74, 0x12, 0xc5, 0x65, 0xd5, 0x8f, 0x52, + 0x60, 0x3a, 0x5c, 0x00, 0x6a, 0xd6, 0x88, 0x0b, 0x1f, 0x80, 0x39, 0xd7, 0xc3, 0x15, 0x5a, 0xa3, + 0x8f, 0xb1, 0x47, 0x6d, 0xeb, 0x7f, 0xd4, 0x32, 0xed, 0x87, 0x71, 0xf4, 0x7c, 0xa7, 0x5d, 0x98, + 0x2b, 0xef, 0x29, 0x85, 0xf6, 0x41, 0x80, 0xb7, 0xc1, 0x84, 0x4b, 0x6a, 0xc4, 0xf0, 0x84, 0xbf, + 0x32, 0x2e, 0xbf, 0xeb, 0xb4, 0x0b, 0x13, 0xe5, 0xc8, 0xf8, 0xab, 0x76, 0xe1, 0x4c, 0x2c, 0x30, + 0x62, 0x12, 0xc5, 0x94, 0xe1, 0x03, 0x30, 0xd6, 0x60, 0xbf, 0x28, 0x71, 0x73, 0xa9, 0xf9, 0xf4, + 0x20, 0x5c, 0x49, 0x06, 0x5c, 0x9f, 0x91, 0xa1, 0x1a, 0x5b, 0x93, 0x48, 0x28, 0xc0, 0x54, 0x3f, + 0x4f, 0x81, 0x0b, 0x37, 0x6d, 0x87, 0x3e, 0x66, 0x55, 0xa1, 0xb6, 0x66, 0x9b, 0x4b, 0x12, 0x91, + 0x38, 0xf0, 0x75, 0x30, 0xc6, 0xce, 0x21, 0x13, 0x7b, 0xb8, 0x07, 0x4f, 0x83, 0xe3, 0x44, 0x6b, + 0xec, 0x54, 0xd9, 0x80, 0xab, 0x31, 0x69, 0xad, 0xb5, 0xa0, 0x89, 0x42, 0xb2, 0x4a, 0x3c, 0x1c, + 0xe6, 0x7a, 0x38, 0x86, 0x02, 0x54, 0x78, 0x1f, 0x64, 0xdc, 0x06, 0x31, 0x24, 0x55, 0xaf, 0xf7, + 0xf5, 0xac, 0xf7, 0x42, 0xcb, 0x0d, 0x62, 0x84, 0xc5, 0x87, 0x7d, 0x21, 0x0e, 0x0b, 0x09, 0x18, + 0x71, 0x39, 0xa5, 0xf9, 0xae, 0x66, 0x4b, 0x7f, 0x3f, 0xac, 0x01, 0x91, 0x17, 0x41, 0xce, 0x89, + 0x6f, 0x24, 0xc1, 0xd5, 0xef, 0x14, 0x50, 0xd8, 0x43, 0x53, 0x27, 0xdb, 0xb8, 0x45, 0x6d, 0x07, + 0x6e, 0x80, 0x51, 0x3e, 0x72, 0xaf, 0x21, 0x43, 0x59, 0x1c, 0x7c, 0x1b, 0x39, 0x6d, 0xf5, 0x2c, + 0xcb, 0xc8, 0xb2, 0xc0, 0x40, 0x3e, 0x18, 0xdc, 0x04, 0xe3, 0xfc, 0xe7, 0x0d, 0xfb, 0xa1, 0x25, + 0xc3, 0x78, 0x60, 0xe4, 0x49, 0x76, 0x42, 0x94, 0x7d, 0x14, 0x14, 0x02, 0xaa, 0xef, 0xa6, 0xc1, + 0xfc, 0x1e, 0x9e, 0x2d, 0xdb, 0x96, 0x49, 0x19, 0xf9, 0xe1, 0xcd, 0x58, 0xfe, 0x2f, 0x26, 0xf2, + 0xff, 0x72, 0x3f, 0xfd, 0x48, 0x3d, 0x58, 0x09, 0xf6, 0x2b, 0x15, 0xc3, 0x92, 0x01, 0x7f, 0xd5, + 0x2e, 0xf4, 0xe8, 0xc7, 0xb4, 0x00, 0x29, 0xbe, 0x2d, 0xb0, 0x05, 0x60, 0x0d, 0xbb, 0xde, 0xba, + 0x83, 0x2d, 0x57, 0x58, 0xa2, 0x75, 0x22, 0x99, 0x70, 0x75, 0x30, 0x22, 0x33, 0x0d, 0x7d, 0x4e, + 0xae, 0x02, 0xae, 0x74, 0xa1, 0xa1, 0x1e, 0x16, 0xe0, 0x6f, 0xc1, 0x88, 0x43, 0xb0, 0x6b, 0x5b, + 0xb9, 0x0c, 0xf7, 0x22, 0xa0, 0x0d, 0xe2, 0xa3, 0x48, 0xce, 0xc2, 0xdf, 0x83, 0xd1, 0x3a, 0x71, + 0x5d, 0x5c, 0x25, 0xb9, 0x61, 0x2e, 0x18, 0xd4, 0xdd, 0x55, 0x31, 0x8c, 0xfc, 0x79, 0xf5, 0x7b, + 0x05, 0x5c, 0xdc, 0x23, 0x8e, 0x2b, 0xd4, 0xf5, 0xe0, 0x66, 0x57, 0xa6, 0x6a, 0x83, 0x39, 0xc8, + 0xb4, 0x79, 0x9e, 0x06, 0x35, 0xc2, 0x1f, 0x89, 0x64, 0xe9, 0x26, 0x18, 0xa6, 0x1e, 0xa9, 0xfb, + 0x05, 0xe8, 0x6f, 0x87, 0xcc, 0xa2, 0xb0, 0xbe, 0xdf, 0x62, 0x68, 0x48, 0x80, 0xaa, 0x4f, 0xd3, + 0x7b, 0xfa, 0xc6, 0x52, 0x19, 0xbe, 0x09, 0xa6, 0xf8, 0x97, 0x3c, 0x5b, 0xc9, 0x96, 0xf4, 0xb0, 0x6f, 0xb5, 0xd8, 0xa7, 0xb5, 0xd1, 0xcf, 0xcb, 0xa5, 0x4c, 0x95, 0x63, 0xd0, 0x28, 0x61, 0x0a, 0x2e, 0x80, 0x6c, 0x9d, 0x5a, 0x88, 0x34, 0x6a, 0xd4, 0xc0, 0xae, 0x3c, 0xa7, 0xa6, 0x3b, 0xed, - 0x42, 0x76, 0x35, 0x1c, 0x46, 0x51, 0x19, 0xf8, 0x7b, 0x90, 0xad, 0xe3, 0x27, 0x81, 0x8a, 0x38, - 0x4f, 0xce, 0x48, 0x7b, 0xd9, 0xd5, 0x70, 0x0a, 0x45, 0xe5, 0xe0, 0x03, 0xc6, 0x06, 0x76, 0x12, + 0x42, 0x76, 0x35, 0x1c, 0x46, 0x51, 0x19, 0xf8, 0x17, 0x90, 0xad, 0xe3, 0x47, 0x81, 0x8a, 0x38, + 0x4f, 0xce, 0x48, 0x7b, 0xd9, 0xd5, 0x70, 0x0a, 0x45, 0xe5, 0xe0, 0x3d, 0xc6, 0x06, 0x76, 0x12, 0xbb, 0xb9, 0x0c, 0x0f, 0xf3, 0xd5, 0xc1, 0x0e, 0x6e, 0x5e, 0xfc, 0x22, 0xcc, 0xe1, 0x10, 0xc8, - 0xc7, 0x82, 0x14, 0x8c, 0x55, 0x64, 0x0d, 0xe2, 0x2c, 0xcb, 0x96, 0xfe, 0x7c, 0xd8, 0xed, 0x93, - 0x30, 0xfa, 0x04, 0xa3, 0x89, 0xff, 0x85, 0x02, 0x78, 0xf5, 0xe3, 0x0c, 0xb8, 0xb4, 0x6f, 0x01, - 0x85, 0x7f, 0x03, 0xd0, 0xae, 0xb8, 0xc4, 0x69, 0x11, 0xf3, 0x96, 0xb8, 0x6f, 0xb0, 0xa6, 0x90, - 0x6d, 0x67, 0x5a, 0x3f, 0xcf, 0x32, 0xec, 0x7e, 0xd7, 0x2c, 0xea, 0xa1, 0x01, 0x0d, 0x30, 0xc9, - 0xf2, 0x4e, 0xec, 0x1d, 0x95, 0xfd, 0xe7, 0xc1, 0x92, 0x7a, 0x96, 0xb5, 0x0e, 0x2b, 0x51, 0x10, - 0x14, 0xc7, 0x84, 0x4b, 0x60, 0x5a, 0xb6, 0x3d, 0x89, 0xbd, 0xbc, 0x20, 0x83, 0x3d, 0xbd, 0x1c, - 0x9f, 0x46, 0x49, 0x79, 0x06, 0x61, 0x12, 0x97, 0x3a, 0xc4, 0x0c, 0x20, 0x32, 0x71, 0x88, 0x9b, - 0xf1, 0x69, 0x94, 0x94, 0x87, 0x35, 0x30, 0x25, 0x51, 0xe5, 0xd6, 0xe6, 0x86, 0x39, 0x3b, 0x06, - 0x6c, 0x50, 0xe5, 0xc9, 0x15, 0xd0, 0x7d, 0x39, 0x86, 0x85, 0x12, 0xd8, 0xd0, 0x03, 0xc0, 0xf0, - 0xab, 0xa9, 0x9b, 0x1b, 0xe1, 0x96, 0xfe, 0x72, 0x48, 0xbe, 0x04, 0x65, 0x39, 0xec, 0x01, 0x82, - 0x21, 0x17, 0x45, 0xec, 0xa8, 0xef, 0x29, 0x60, 0x26, 0xd9, 0xe0, 0x06, 0x57, 0x0b, 0x65, 0xcf, - 0xab, 0xc5, 0x43, 0x30, 0x26, 0x5a, 0x25, 0xdb, 0x91, 0x04, 0xf8, 0xdd, 0x80, 0x45, 0x0f, 0x57, - 0x48, 0xad, 0x2c, 0x55, 0x05, 0x9d, 0xfd, 0x2f, 0x14, 0x40, 0xaa, 0x1f, 0x65, 0x00, 0x08, 0x53, - 0x0c, 0x2e, 0xc6, 0x4e, 0xb9, 0xf9, 0xc4, 0x29, 0x37, 0x13, 0xbd, 0xa7, 0x44, 0x4e, 0xb4, 0x0d, - 0x30, 0x62, 0xf3, 0xd2, 0x23, 0x57, 0x58, 0xea, 0x17, 0xcc, 0xa0, 0x4d, 0x0a, 0xd0, 0x74, 0xc0, - 0xce, 0x0e, 0x59, 0xc0, 0x24, 0x1a, 0xbc, 0x07, 0x32, 0x0d, 0xdb, 0xf4, 0xfb, 0x9a, 0xbe, 0x2d, - 0xe1, 0x9a, 0x6d, 0xba, 0x31, 0xcc, 0x31, 0xb6, 0x76, 0x36, 0x8a, 0x38, 0x0e, 0x6b, 0x33, 0xfd, - 0x97, 0x0a, 0x4e, 0xd1, 0x6c, 0x69, 0xb1, 0x1f, 0x66, 0xaf, 0x47, 0x01, 0x11, 0x4c, 0x7f, 0x06, - 0x05, 0x98, 0xf0, 0x2d, 0x05, 0xcc, 0x1a, 0xc9, 0x0b, 0x76, 0x6e, 0x74, 0xb0, 0xae, 0x6c, 0xdf, - 0x77, 0x08, 0xfd, 0x5c, 0xa7, 0x5d, 0x98, 0xed, 0x12, 0x41, 0xdd, 0xe6, 0x98, 0x93, 0x44, 0xde, - 0xc6, 0x64, 0x2d, 0xec, 0xeb, 0x64, 0xaf, 0x6b, 0xa8, 0x70, 0xd2, 0x9f, 0x41, 0x01, 0xa6, 0xfa, - 0x2c, 0x03, 0x26, 0x62, 0xd7, 0xbc, 0x9f, 0x83, 0x33, 0x22, 0xe1, 0x8f, 0x96, 0x33, 0x02, 0xf3, - 0xe8, 0x39, 0x23, 0x70, 0x4f, 0x94, 0x33, 0xc2, 0xe4, 0x49, 0x72, 0x26, 0xe2, 0x64, 0x0f, 0xce, - 0x7c, 0x9e, 0xf2, 0x39, 0x23, 0x9a, 0x8e, 0xc1, 0x38, 0x23, 0x64, 0x23, 0x9c, 0xb9, 0x1f, 0xbd, - 0x49, 0xf7, 0xe9, 0xfe, 0x34, 0x3f, 0xc2, 0xda, 0xdf, 0x9b, 0xd8, 0xf2, 0xa8, 0xb7, 0xab, 0x8f, - 0x77, 0xdd, 0xba, 0x4d, 0x30, 0x81, 0x5b, 0xc4, 0xc1, 0x55, 0xc2, 0x87, 0x25, 0x69, 0x0e, 0x8a, - 0x3b, 0xc3, 0x2e, 0xbd, 0x4b, 0x11, 0x1c, 0x14, 0x43, 0x65, 0x0d, 0x81, 0xfc, 0x7e, 0xe0, 0x05, - 0xb7, 0x69, 0x79, 0x46, 0xf2, 0x86, 0x60, 0xa9, 0x6b, 0x16, 0xf5, 0xd0, 0x50, 0xdf, 0x4d, 0x81, - 0xd9, 0xae, 0x77, 0x8c, 0x30, 0x28, 0xca, 0x31, 0x05, 0x25, 0x75, 0x82, 0x41, 0x49, 0x1f, 0x38, - 0x28, 0x5f, 0xa4, 0x00, 0xec, 0x3e, 0x4e, 0xe0, 0xff, 0x78, 0x53, 0x62, 0x38, 0xb4, 0x42, 0x4c, - 0x31, 0x7d, 0x14, 0x0d, 0x75, 0xb4, 0xa3, 0x89, 0x62, 0xa3, 0xa4, 0xb1, 0x63, 0x7a, 0xf2, 0x0d, - 0x5f, 0xd4, 0xd2, 0x47, 0xfb, 0xa2, 0xa6, 0x7e, 0x95, 0x0c, 0xe3, 0xa9, 0x7e, 0xc2, 0xeb, 0xb5, - 0xfd, 0xe9, 0x13, 0xdc, 0x7e, 0xf5, 0x33, 0x05, 0xcc, 0x24, 0xdb, 0x91, 0x53, 0xf7, 0xb0, 0xfb, - 0x65, 0xdc, 0x89, 0xd3, 0xfd, 0xa8, 0xfb, 0x4c, 0x01, 0x67, 0x4f, 0xd9, 0x3f, 0x3c, 0xea, 0x27, - 0xdd, 0x6b, 0x3e, 0x2d, 0xff, 0xd3, 0xe8, 0xd7, 0x5e, 0xbc, 0xce, 0x0f, 0xbd, 0x7c, 0x9d, 0x1f, - 0x7a, 0xf5, 0x3a, 0x3f, 0xf4, 0xff, 0x4e, 0x5e, 0x79, 0xd1, 0xc9, 0x2b, 0x2f, 0x3b, 0x79, 0xe5, - 0x55, 0x27, 0xaf, 0x7c, 0xd7, 0xc9, 0x2b, 0xef, 0x7c, 0x9f, 0x1f, 0xfa, 0xd7, 0xa8, 0x84, 0xfe, - 0x29, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x79, 0x7e, 0xc9, 0x1b, 0x1d, 0x00, 0x00, + 0xc7, 0x82, 0x14, 0x8c, 0x55, 0x64, 0x0d, 0xe2, 0x2c, 0xcb, 0x96, 0xfe, 0x71, 0xd8, 0xed, 0x93, + 0x30, 0xfa, 0x04, 0xa3, 0x89, 0xff, 0x85, 0x02, 0x78, 0xf5, 0xd3, 0x0c, 0xb8, 0xb4, 0x6f, 0x01, + 0x85, 0xff, 0x06, 0xd0, 0xae, 0xb8, 0xc4, 0x69, 0x11, 0xf3, 0x3f, 0xe2, 0xbe, 0xc1, 0x9a, 0x42, + 0xb6, 0x9d, 0x69, 0xfd, 0x3c, 0xcb, 0xb0, 0xbb, 0x5d, 0xb3, 0xa8, 0x87, 0x06, 0x34, 0xc0, 0x24, + 0xcb, 0x3b, 0xb1, 0x77, 0x54, 0xf6, 0x9f, 0x07, 0x4b, 0xea, 0x59, 0xd6, 0x3a, 0xac, 0x44, 0x41, + 0x50, 0x1c, 0x13, 0x2e, 0x81, 0x69, 0xd9, 0xf6, 0x24, 0xf6, 0xf2, 0x82, 0x0c, 0xf6, 0xf4, 0x72, + 0x7c, 0x1a, 0x25, 0xe5, 0x19, 0x84, 0x49, 0x5c, 0xea, 0x10, 0x33, 0x80, 0xc8, 0xc4, 0x21, 0x6e, + 0xc4, 0xa7, 0x51, 0x52, 0x1e, 0xd6, 0xc0, 0x94, 0x44, 0x95, 0x5b, 0x9b, 0x1b, 0xe6, 0xec, 0x18, + 0xb0, 0x41, 0x95, 0x27, 0x57, 0x40, 0xf7, 0xe5, 0x18, 0x16, 0x4a, 0x60, 0x43, 0x0f, 0x00, 0xc3, + 0xaf, 0xa6, 0x6e, 0x6e, 0x84, 0x5b, 0xfa, 0xe7, 0x21, 0xf9, 0x12, 0x94, 0xe5, 0xb0, 0x07, 0x08, + 0x86, 0x5c, 0x14, 0xb1, 0xa3, 0x7e, 0xa0, 0x80, 0x99, 0x64, 0x83, 0x1b, 0x5c, 0x2d, 0x94, 0x3d, + 0xaf, 0x16, 0xf7, 0xc1, 0x98, 0x68, 0x95, 0x6c, 0x47, 0x12, 0xe0, 0xcf, 0x03, 0x16, 0x3d, 0x5c, + 0x21, 0xb5, 0xb2, 0x54, 0x15, 0x74, 0xf6, 0xbf, 0x50, 0x00, 0xa9, 0x7e, 0x92, 0x01, 0x20, 0x4c, + 0x31, 0xb8, 0x18, 0x3b, 0xe5, 0xe6, 0x13, 0xa7, 0xdc, 0x4c, 0xf4, 0x9e, 0x12, 0x39, 0xd1, 0x36, + 0xc0, 0x88, 0xcd, 0x4b, 0x8f, 0x5c, 0x61, 0xa9, 0x5f, 0x30, 0x83, 0x36, 0x29, 0x40, 0xd3, 0x01, + 0x3b, 0x3b, 0x64, 0x01, 0x93, 0x68, 0xf0, 0x0e, 0xc8, 0x34, 0x6c, 0xd3, 0xef, 0x6b, 0xfa, 0xb6, + 0x84, 0x6b, 0xb6, 0xe9, 0xc6, 0x30, 0xc7, 0xd8, 0xda, 0xd9, 0x28, 0xe2, 0x38, 0xac, 0xcd, 0xf4, + 0x5f, 0x2a, 0x38, 0x45, 0xb3, 0xa5, 0xc5, 0x7e, 0x98, 0xbd, 0x1e, 0x05, 0x44, 0x30, 0xfd, 0x19, + 0x14, 0x60, 0xc2, 0x77, 0x14, 0x30, 0x6b, 0x24, 0x2f, 0xd8, 0xb9, 0xd1, 0xc1, 0xba, 0xb2, 0x7d, + 0xdf, 0x21, 0xf4, 0x73, 0x9d, 0x76, 0x61, 0xb6, 0x4b, 0x04, 0x75, 0x9b, 0x63, 0x4e, 0x12, 0x79, + 0x1b, 0x93, 0xb5, 0xb0, 0xaf, 0x93, 0xbd, 0xae, 0xa1, 0xc2, 0x49, 0x7f, 0x06, 0x05, 0x98, 0xea, + 0x93, 0x0c, 0x98, 0x88, 0x5d, 0xf3, 0x7e, 0x0d, 0xce, 0x88, 0x84, 0x3f, 0x5a, 0xce, 0x08, 0xcc, + 0xa3, 0xe7, 0x8c, 0xc0, 0x3d, 0x51, 0xce, 0x08, 0x93, 0x27, 0xc9, 0x99, 0x88, 0x93, 0x3d, 0x38, + 0xf3, 0x65, 0xca, 0xe7, 0x8c, 0x68, 0x3a, 0x06, 0xe3, 0x8c, 0x90, 0x8d, 0x70, 0xe6, 0x6e, 0xf4, + 0x26, 0xdd, 0xa7, 0xfb, 0xd3, 0xfc, 0x08, 0x6b, 0xff, 0x6d, 0x62, 0xcb, 0xa3, 0xde, 0xae, 0x3e, + 0xde, 0x75, 0xeb, 0x36, 0xc1, 0x04, 0x6e, 0x11, 0x07, 0x57, 0x09, 0x1f, 0x96, 0xa4, 0x39, 0x28, + 0xee, 0x0c, 0xbb, 0xf4, 0x2e, 0x45, 0x70, 0x50, 0x0c, 0x95, 0x35, 0x04, 0xf2, 0xfb, 0x9e, 0x17, + 0xdc, 0xa6, 0xe5, 0x19, 0xc9, 0x1b, 0x82, 0xa5, 0xae, 0x59, 0xd4, 0x43, 0x43, 0x7d, 0x3f, 0x05, + 0x66, 0xbb, 0xde, 0x31, 0xc2, 0xa0, 0x28, 0xc7, 0x14, 0x94, 0xd4, 0x09, 0x06, 0x25, 0x7d, 0xe0, + 0xa0, 0x7c, 0x95, 0x02, 0xb0, 0xfb, 0x38, 0x81, 0x6f, 0xf1, 0xa6, 0xc4, 0x70, 0x68, 0x85, 0x98, + 0x62, 0xfa, 0x28, 0x1a, 0xea, 0x68, 0x47, 0x13, 0xc5, 0x46, 0x49, 0x63, 0xc7, 0xf4, 0xe4, 0x1b, + 0xbe, 0xa8, 0xa5, 0x8f, 0xf6, 0x45, 0x4d, 0xfd, 0x26, 0x19, 0xc6, 0x53, 0xfd, 0x84, 0xd7, 0x6b, + 0xfb, 0xd3, 0x27, 0xb8, 0xfd, 0xea, 0x17, 0x0a, 0x98, 0x49, 0xb6, 0x23, 0xa7, 0xee, 0x61, 0xf7, + 0xeb, 0xb8, 0x13, 0xa7, 0xfb, 0x51, 0xf7, 0x89, 0x02, 0xce, 0x9e, 0xb2, 0x7f, 0x78, 0xd4, 0xcf, + 0xba, 0xd7, 0x7c, 0x5a, 0xfe, 0xa7, 0xd1, 0x6f, 0x3c, 0x7b, 0x99, 0x1f, 0x7a, 0xfe, 0x32, 0x3f, + 0xf4, 0xe2, 0x65, 0x7e, 0xe8, 0xed, 0x4e, 0x5e, 0x79, 0xd6, 0xc9, 0x2b, 0xcf, 0x3b, 0x79, 0xe5, + 0x45, 0x27, 0xaf, 0xfc, 0xd0, 0xc9, 0x2b, 0xef, 0xfd, 0x98, 0x1f, 0x7a, 0x2d, 0xbf, 0xff, 0x1f, + 0x9f, 0xbf, 0x04, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x27, 0xde, 0xc0, 0x19, 0x1d, 0x00, 0x00, } func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 7dee14470..c88fc1fe2 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v2beta2"; +option go_package = "k8s.io/api/autoscaling/v2beta2"; // ContainerResourceMetricSource indicates how to scale on a resource metric known to // Kubernetes, as specified in requests and limits, describing each pod in the @@ -54,25 +54,25 @@ message ContainerResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ContainerResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric optional MetricValueStatus current = 2; - // Container is the name of the container in the pods of the scaling target + // container is the name of the container in the pods of the scaling target optional string container = 3; } // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 2; - // API version of the referent + // apiVersion is the API version of the referent // +optional optional string apiVersion = 3; } @@ -100,14 +100,14 @@ message ExternalMetricStatus { // HPAScalingPolicy is a single policy which must hold true for a specified past interval. message HPAScalingPolicy { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. optional string type = 1; - // Value contains the amount of change which is permitted by the policy. + // value contains the amount of change which is permitted by the policy. // It must be greater than zero optional int32 value = 2; - // PeriodSeconds specifies the window of time for which the policy should hold true. + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). optional int32 periodSeconds = 3; } @@ -119,7 +119,7 @@ message HPAScalingPolicy { // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -136,6 +136,7 @@ message HPAScalingRules { // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional + // +listType=atomic repeated HPAScalingPolicy policies = 2; } @@ -146,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -189,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -205,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -238,6 +239,7 @@ message HorizontalPodAutoscalerSpec { // more information about how each type of metric must respond. // If not set, the default metric will be set to 80% average CPU utilization. // +optional + // +listType=atomic repeated MetricSpec metrics = 4; // behavior configures the scaling behavior of the target @@ -256,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -268,11 +270,13 @@ message HorizontalPodAutoscalerStatus { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic repeated MetricStatus currentMetrics = 5; // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic repeated HorizontalPodAutoscalerCondition conditions = 6; } @@ -285,7 +289,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -361,7 +365,7 @@ message MetricStatus { // +optional optional ResourceMetricStatus resource = 4; - // container resource refers to a resource metric (such as those specified in + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available @@ -385,12 +389,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -404,14 +408,14 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; - // currentAverageUtilization is the current value of the average of the + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional @@ -485,7 +489,7 @@ message ResourceMetricSource { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. message ResourceMetricStatus { - // Name is the name of the resource in question. + // name is the name of the resource in question. optional string name = 1; // current contains the current value for the given metric diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 10bdec5b9..2fee0b8a0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -62,9 +62,11 @@ type HorizontalPodAutoscalerSpec struct { // available. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. // It cannot be less that minReplicas. MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the // desired replica count (the maximum replica count across all metrics will // be used). The desired replica count is calculated multiplying the @@ -74,6 +76,7 @@ type HorizontalPodAutoscalerSpec struct { // more information about how each type of metric must respond. // If not set, the default metric will be set to 80% average CPU utilization. // +optional + // +listType=atomic Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` // behavior configures the scaling behavior of the target @@ -85,11 +88,13 @@ type HorizontalPodAutoscalerSpec struct { // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + + // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent + + // apiVersion is the API version of the referent // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` } @@ -107,11 +112,13 @@ type MetricSpec struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. // +optional Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to @@ -119,6 +126,7 @@ type MetricSpec struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // container resource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in // each pod of the current scale target (e.g. CPU or memory). Such metrics are @@ -127,6 +135,7 @@ type MetricSpec struct { // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -146,6 +155,7 @@ type HorizontalPodAutoscalerBehavior struct { // No stabilization is used. // +optional ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. // If not set, the default value is to allow to scale down to minReplicas pods, with a // 300 second stabilization window (i.e., the highest recommendation for @@ -173,7 +183,7 @@ const ( // number of replicas is not set instantly, instead, the safest value from the stabilization // window is chosen. type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // stabilizationWindowSeconds is the number of seconds for which past recommendations should be // considered while scaling up or scaling down. // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). // If not set, use the default values: @@ -181,13 +191,16 @@ type HPAScalingRules struct { // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). // +optional StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. // If not set, the default value MaxPolicySelect is used. // +optional SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid // +optional + // +listType=atomic Policies []HPAScalingPolicy `json:"policies,omitempty" protobuf:"bytes,2,rep,name=policies"` } @@ -204,12 +217,14 @@ const ( // HPAScalingPolicy is a single policy which must hold true for a specified past interval. type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. + // type is used to specify the scaling policy. Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. + + // value contains the amount of change which is permitted by the policy. // It must be greater than zero Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. + + // periodSeconds specifies the window of time for which the policy should hold true. // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` } @@ -251,6 +266,7 @@ type ObjectMetricSource struct { DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` } @@ -262,6 +278,7 @@ type ObjectMetricSource struct { type PodsMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -276,6 +293,7 @@ type PodsMetricSource struct { type ResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -290,8 +308,10 @@ type ResourceMetricSource struct { type ContainerResourceMetricSource struct { // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -302,6 +322,7 @@ type ContainerResourceMetricSource struct { type ExternalMetricSource struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` } @@ -310,6 +331,7 @@ type ExternalMetricSource struct { type MetricIdentifier struct { // name is the name of the given metric Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. @@ -321,13 +343,16 @@ type MetricIdentifier struct { type MetricTarget struct { // type represents whether the metric type is Utilization, Value, or AverageValue Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. @@ -370,11 +395,13 @@ type HorizontalPodAutoscalerStatus struct { // currentMetrics is the last read state of the metrics used by this autoscaler. // +optional + // +listType=atomic CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` // conditions is the set of conditions required for this autoscaler to scale its target, // and indicates whether or not those conditions are met. // +optional + // +listType=atomic Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` } @@ -399,15 +426,19 @@ const ( type HorizontalPodAutoscalerCondition struct { // type describes the current condition Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about // the transition // +optional @@ -426,6 +457,7 @@ type MetricStatus struct { // (for example, hits-per-second on an Ingress object). // +optional Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target // (for example, transactions-processed-per-second). The values will be // averaged together before being compared to the target value. @@ -438,13 +470,15 @@ type MetricStatus struct { // to normal per-pod metrics using the "pods" source. // +optional Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // container resource refers to a resource metric (such as those specified in + + // containerResource refers to a resource metric (such as those specified in // requests and limits) known to Kubernetes describing a single container in each pod in the // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. // +optional ContainerResource *ContainerResourceMetricStatus `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` + // external refers to a global metric that is not associated // with any Kubernetes object. It allows autoscaling based on information // coming from components running outside of cluster @@ -459,6 +493,7 @@ type MetricStatus struct { type ObjectMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` @@ -470,6 +505,7 @@ type ObjectMetricStatus struct { type PodsMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -480,8 +516,9 @@ type PodsMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -492,11 +529,13 @@ type ResourceMetricStatus struct { // Kubernetes, and have special scaling options on top of those available to // normal per-pod metrics using the "pods" source. type ContainerResourceMetricStatus struct { - // Name is the name of the resource in question. + // name is the name of the resource in question. Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - // Container is the name of the container in the pods of the scaling target + + // container is the name of the container in the pods of the scaling target Container string `json:"container" protobuf:"bytes,3,opt,name=container"` } @@ -505,6 +544,7 @@ type ContainerResourceMetricStatus struct { type ExternalMetricStatus struct { // metric identifies the target metric by name and selector Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` } @@ -514,11 +554,13 @@ type MetricValueStatus struct { // value is the current value of the metric (as a quantity). // +optional Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` - // currentAverageUtilization is the current value of the average of the + + // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of // the requested value of the resource for the pods. // +optional diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index e3ea3002b..4af7d0ec0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v2beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ContainerResourceMetricSource = map[string]string{ @@ -40,9 +40,9 @@ func (ContainerResourceMetricSource) SwaggerDoc() map[string]string { var map_ContainerResourceMetricStatus = map[string]string{ "": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", - "container": "Container is the name of the container in the pods of the scaling target", + "container": "container is the name of the container in the pods of the scaling target", } func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { @@ -51,9 +51,9 @@ func (ContainerResourceMetricStatus) SwaggerDoc() map[string]string { var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", + "kind": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "apiVersion": "apiVersion is the API version of the referent", } func (CrossVersionObjectReference) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HPAScalingPolicy = map[string]string{ "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "type is used to specify the scaling policy.", + "value": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", } func (HPAScalingPolicy) SwaggerDoc() map[string]string { @@ -93,7 +93,7 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string { var map_HPAScalingRules = map[string]string{ "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", } @@ -203,7 +203,7 @@ var map_MetricStatus = map[string]string{ "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -227,7 +227,7 @@ var map_MetricValueStatus = map[string]string{ "": "MetricValueStatus holds the current value for a metric", "value": "value is the current value of the metric (as a quantity).", "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "averageUtilization": "averageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", } func (MetricValueStatus) SwaggerDoc() map[string]string { @@ -286,7 +286,7 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { var map_ResourceMetricStatus = map[string]string{ "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", + "name": "name is the name of the resource in question.", "current": "current contains the current value for the given metric", } diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index c4a8db6e7..cb5cbb600 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -17,5 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index a33edf5b7..6108a6083 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto +// source: k8s.io/api/batch/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CronJob) Reset() { *m = CronJob{} } func (*CronJob) ProtoMessage() {} func (*CronJob) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{0} + return fileDescriptor_79228dc2c4001a22, []int{0} } func (m *CronJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_CronJob proto.InternalMessageInfo func (m *CronJobList) Reset() { *m = CronJobList{} } func (*CronJobList) ProtoMessage() {} func (*CronJobList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{1} + return fileDescriptor_79228dc2c4001a22, []int{1} } func (m *CronJobList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_CronJobList proto.InternalMessageInfo func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } func (*CronJobSpec) ProtoMessage() {} func (*CronJobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{2} + return fileDescriptor_79228dc2c4001a22, []int{2} } func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } func (*CronJobStatus) ProtoMessage() {} func (*CronJobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{3} + return fileDescriptor_79228dc2c4001a22, []int{3} } func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo func (m *Job) Reset() { *m = Job{} } func (*Job) ProtoMessage() {} func (*Job) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{4} + return fileDescriptor_79228dc2c4001a22, []int{4} } func (m *Job) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_Job proto.InternalMessageInfo func (m *JobCondition) Reset() { *m = JobCondition{} } func (*JobCondition) ProtoMessage() {} func (*JobCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{5} + return fileDescriptor_79228dc2c4001a22, []int{5} } func (m *JobCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_JobCondition proto.InternalMessageInfo func (m *JobList) Reset() { *m = JobList{} } func (*JobList) ProtoMessage() {} func (*JobList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{6} + return fileDescriptor_79228dc2c4001a22, []int{6} } func (m *JobList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_JobList proto.InternalMessageInfo func (m *JobSpec) Reset() { *m = JobSpec{} } func (*JobSpec) ProtoMessage() {} func (*JobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{7} + return fileDescriptor_79228dc2c4001a22, []int{7} } func (m *JobSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_JobSpec proto.InternalMessageInfo func (m *JobStatus) Reset() { *m = JobStatus{} } func (*JobStatus) ProtoMessage() {} func (*JobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{8} + return fileDescriptor_79228dc2c4001a22, []int{8} } func (m *JobStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_JobStatus proto.InternalMessageInfo func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{9} + return fileDescriptor_79228dc2c4001a22, []int{9} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,10 +328,182 @@ func (m *JobTemplateSpec) XXX_DiscardUnknown() { var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo +func (m *PodFailurePolicy) Reset() { *m = PodFailurePolicy{} } +func (*PodFailurePolicy) ProtoMessage() {} +func (*PodFailurePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{10} +} +func (m *PodFailurePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicy.Merge(m, src) +} +func (m *PodFailurePolicy) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicy proto.InternalMessageInfo + +func (m *PodFailurePolicyOnExitCodesRequirement) Reset() { + *m = PodFailurePolicyOnExitCodesRequirement{} +} +func (*PodFailurePolicyOnExitCodesRequirement) ProtoMessage() {} +func (*PodFailurePolicyOnExitCodesRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{11} +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement.Merge(m, src) +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicyOnExitCodesRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement proto.InternalMessageInfo + +func (m *PodFailurePolicyOnPodConditionsPattern) Reset() { + *m = PodFailurePolicyOnPodConditionsPattern{} +} +func (*PodFailurePolicyOnPodConditionsPattern) ProtoMessage() {} +func (*PodFailurePolicyOnPodConditionsPattern) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{12} +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern.Merge(m, src) +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicyOnPodConditionsPattern) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern proto.InternalMessageInfo + +func (m *PodFailurePolicyRule) Reset() { *m = PodFailurePolicyRule{} } +func (*PodFailurePolicyRule) ProtoMessage() {} +func (*PodFailurePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{13} +} +func (m *PodFailurePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodFailurePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodFailurePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodFailurePolicyRule.Merge(m, src) +} +func (m *PodFailurePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PodFailurePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PodFailurePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PodFailurePolicyRule proto.InternalMessageInfo + +func (m *SuccessPolicy) Reset() { *m = SuccessPolicy{} } +func (*SuccessPolicy) ProtoMessage() {} +func (*SuccessPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{14} +} +func (m *SuccessPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuccessPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuccessPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuccessPolicy.Merge(m, src) +} +func (m *SuccessPolicy) XXX_Size() int { + return m.Size() +} +func (m *SuccessPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_SuccessPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_SuccessPolicy proto.InternalMessageInfo + +func (m *SuccessPolicyRule) Reset() { *m = SuccessPolicyRule{} } +func (*SuccessPolicyRule) ProtoMessage() {} +func (*SuccessPolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_79228dc2c4001a22, []int{15} +} +func (m *SuccessPolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuccessPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuccessPolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuccessPolicyRule.Merge(m, src) +} +func (m *SuccessPolicyRule) XXX_Size() int { + return m.Size() +} +func (m *SuccessPolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_SuccessPolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_SuccessPolicyRule proto.InternalMessageInfo + func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} } func (*UncountedTerminatedPods) ProtoMessage() {} func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{10} + return fileDescriptor_79228dc2c4001a22, []int{16} } func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -367,104 +539,139 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1.JobTemplateSpec") + proto.RegisterType((*PodFailurePolicy)(nil), "k8s.io.api.batch.v1.PodFailurePolicy") + proto.RegisterType((*PodFailurePolicyOnExitCodesRequirement)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement") + proto.RegisterType((*PodFailurePolicyOnPodConditionsPattern)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern") + proto.RegisterType((*PodFailurePolicyRule)(nil), "k8s.io.api.batch.v1.PodFailurePolicyRule") + proto.RegisterType((*SuccessPolicy)(nil), "k8s.io.api.batch.v1.SuccessPolicy") + proto.RegisterType((*SuccessPolicyRule)(nil), "k8s.io.api.batch.v1.SuccessPolicyRule") proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) -} - -var fileDescriptor_3b52da57c93de713 = []byte{ - // 1413 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x41, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0x26, 0x71, 0x62, 0x8f, 0x93, 0xd4, 0x9d, 0xfe, 0xdb, 0xfa, 0x6f, 0x2a, 0x6f, 0x6a, - 0x0a, 0x0a, 0xa8, 0xac, 0x49, 0x88, 0x10, 0x42, 0x80, 0x94, 0x4d, 0x55, 0x68, 0x70, 0xd4, 0x30, - 0x76, 0x84, 0x04, 0x05, 0xb1, 0xde, 0x1d, 0x3b, 0xdb, 0xec, 0xee, 0x58, 0x3b, 0x63, 0x0b, 0xdf, - 0x90, 0xf8, 0x02, 0xf0, 0x25, 0x10, 0x27, 0x84, 0x04, 0x67, 0x8e, 0xa8, 0xc7, 0x1e, 0x7b, 0x5a, - 0xd1, 0xe5, 0x03, 0x70, 0x0f, 0x17, 0x34, 0xb3, 0xe3, 0xdd, 0xb5, 0xbd, 0x1b, 0xd2, 0x1e, 0x2a, - 0x6e, 0xd9, 0x37, 0xbf, 0xf7, 0x9b, 0xe7, 0xf7, 0x7e, 0xf3, 0xde, 0x0b, 0x78, 0xef, 0xf4, 0x1d, - 0xaa, 0xd9, 0xa4, 0x79, 0x3a, 0xec, 0x62, 0xdf, 0xc3, 0x0c, 0xd3, 0xe6, 0x08, 0x7b, 0x16, 0xf1, - 0x9b, 0xf2, 0xc0, 0x18, 0xd8, 0xcd, 0xae, 0xc1, 0xcc, 0x93, 0xe6, 0x68, 0xbb, 0xd9, 0xc7, 0x1e, - 0xf6, 0x0d, 0x86, 0x2d, 0x6d, 0xe0, 0x13, 0x46, 0xe0, 0x95, 0x08, 0xa4, 0x19, 0x03, 0x5b, 0x13, - 0x20, 0x6d, 0xb4, 0x5d, 0x7b, 0xa3, 0x6f, 0xb3, 0x93, 0x61, 0x57, 0x33, 0x89, 0xdb, 0xec, 0x93, - 0x3e, 0x69, 0x0a, 0x6c, 0x77, 0xd8, 0x13, 0x5f, 0xe2, 0x43, 0xfc, 0x15, 0x71, 0xd4, 0x1a, 0xa9, - 0x8b, 0x4c, 0xe2, 0xe3, 0x8c, 0x7b, 0x6a, 0xbb, 0x09, 0xc6, 0x35, 0xcc, 0x13, 0xdb, 0xc3, 0xfe, - 0xb8, 0x39, 0x38, 0xed, 0x73, 0x03, 0x6d, 0xba, 0x98, 0x19, 0x59, 0x5e, 0xcd, 0x3c, 0x2f, 0x7f, - 0xe8, 0x31, 0xdb, 0xc5, 0x73, 0x0e, 0x6f, 0xff, 0x9b, 0x03, 0x35, 0x4f, 0xb0, 0x6b, 0xcc, 0xfa, - 0x35, 0xfe, 0x56, 0xc0, 0xea, 0xbe, 0x4f, 0xbc, 0x03, 0xd2, 0x85, 0x5f, 0x81, 0x22, 0x8f, 0xc7, - 0x32, 0x98, 0x51, 0x55, 0x36, 0x95, 0xad, 0xf2, 0xce, 0x9b, 0x5a, 0x92, 0xa5, 0x98, 0x56, 0x1b, - 0x9c, 0xf6, 0xb9, 0x81, 0x6a, 0x1c, 0xad, 0x8d, 0xb6, 0xb5, 0xfb, 0xdd, 0x87, 0xd8, 0x64, 0x87, - 0x98, 0x19, 0x3a, 0x7c, 0x14, 0xa8, 0x0b, 0x61, 0xa0, 0x82, 0xc4, 0x86, 0x62, 0x56, 0xa8, 0x83, - 0x65, 0x3a, 0xc0, 0x66, 0x75, 0x51, 0xb0, 0x6f, 0x6a, 0x19, 0x35, 0xd0, 0x64, 0x34, 0xed, 0x01, - 0x36, 0xf5, 0x35, 0xc9, 0xb6, 0xcc, 0xbf, 0x90, 0xf0, 0x85, 0x07, 0x60, 0x85, 0x32, 0x83, 0x0d, - 0x69, 0x75, 0x49, 0xb0, 0x34, 0xce, 0x65, 0x11, 0x48, 0x7d, 0x43, 0xf2, 0xac, 0x44, 0xdf, 0x48, - 0x32, 0x34, 0x7e, 0x52, 0x40, 0x59, 0x22, 0x5b, 0x36, 0x65, 0xf0, 0xc1, 0x5c, 0x06, 0xb4, 0x8b, - 0x65, 0x80, 0x7b, 0x8b, 0xdf, 0x5f, 0x91, 0x37, 0x15, 0x27, 0x96, 0xd4, 0xaf, 0xdf, 0x03, 0x05, - 0x9b, 0x61, 0x97, 0x56, 0x17, 0x37, 0x97, 0xb6, 0xca, 0x3b, 0x37, 0xce, 0x0b, 0x5c, 0x5f, 0x97, - 0x44, 0x85, 0x7b, 0xdc, 0x05, 0x45, 0x9e, 0x8d, 0x1f, 0x97, 0xe3, 0x80, 0x79, 0x4a, 0xe0, 0x6d, - 0x50, 0xe4, 0x85, 0xb5, 0x86, 0x0e, 0x16, 0x01, 0x97, 0x92, 0x00, 0xda, 0xd2, 0x8e, 0x62, 0x04, - 0x3c, 0x06, 0xd7, 0x29, 0x33, 0x7c, 0x66, 0x7b, 0xfd, 0x3b, 0xd8, 0xb0, 0x1c, 0xdb, 0xc3, 0x6d, - 0x6c, 0x12, 0xcf, 0xa2, 0xa2, 0x22, 0x4b, 0xfa, 0x4b, 0x61, 0xa0, 0x5e, 0x6f, 0x67, 0x43, 0x50, - 0x9e, 0x2f, 0x7c, 0x00, 0x2e, 0x9b, 0xc4, 0x33, 0x87, 0xbe, 0x8f, 0x3d, 0x73, 0x7c, 0x44, 0x1c, - 0xdb, 0x1c, 0x8b, 0xe2, 0x94, 0x74, 0x4d, 0x46, 0x73, 0x79, 0x7f, 0x16, 0x70, 0x96, 0x65, 0x44, - 0xf3, 0x44, 0xf0, 0x15, 0xb0, 0x4a, 0x87, 0x74, 0x80, 0x3d, 0xab, 0xba, 0xbc, 0xa9, 0x6c, 0x15, - 0xf5, 0x72, 0x18, 0xa8, 0xab, 0xed, 0xc8, 0x84, 0x26, 0x67, 0xf0, 0x73, 0x50, 0x7e, 0x48, 0xba, - 0x1d, 0xec, 0x0e, 0x1c, 0x83, 0xe1, 0x6a, 0x41, 0x54, 0xef, 0x56, 0x66, 0x8a, 0x0f, 0x12, 0x9c, - 0x50, 0xd9, 0x15, 0x19, 0x64, 0x39, 0x75, 0x80, 0xd2, 0x6c, 0xf0, 0x4b, 0x50, 0xa3, 0x43, 0xd3, - 0xc4, 0x94, 0xf6, 0x86, 0xce, 0x01, 0xe9, 0xd2, 0x8f, 0x6c, 0xca, 0x88, 0x3f, 0x6e, 0xd9, 0xae, - 0xcd, 0xaa, 0x2b, 0x9b, 0xca, 0x56, 0x41, 0xaf, 0x87, 0x81, 0x5a, 0x6b, 0xe7, 0xa2, 0xd0, 0x39, - 0x0c, 0x10, 0x81, 0x6b, 0x3d, 0xc3, 0x76, 0xb0, 0x35, 0xc7, 0xbd, 0x2a, 0xb8, 0x6b, 0x61, 0xa0, - 0x5e, 0xbb, 0x9b, 0x89, 0x40, 0x39, 0x9e, 0x8d, 0xdf, 0x16, 0xc1, 0xfa, 0xd4, 0x2b, 0x80, 0x1f, - 0x83, 0x15, 0xc3, 0x64, 0xf6, 0x88, 0x4b, 0x85, 0x0b, 0xf0, 0xe5, 0x74, 0x76, 0x78, 0xff, 0x4a, - 0xde, 0x32, 0xc2, 0x3d, 0xcc, 0x8b, 0x80, 0x93, 0xa7, 0xb3, 0x27, 0x5c, 0x91, 0xa4, 0x80, 0x0e, - 0xa8, 0x38, 0x06, 0x65, 0x13, 0x95, 0x75, 0x6c, 0x17, 0x8b, 0xfa, 0x94, 0x77, 0x5e, 0xbf, 0xd8, - 0x93, 0xe1, 0x1e, 0xfa, 0xff, 0xc2, 0x40, 0xad, 0xb4, 0x66, 0x78, 0xd0, 0x1c, 0x33, 0xf4, 0x01, - 0x14, 0xb6, 0x38, 0x85, 0xe2, 0xbe, 0xc2, 0x33, 0xdf, 0x77, 0x2d, 0x0c, 0x54, 0xd8, 0x9a, 0x63, - 0x42, 0x19, 0xec, 0x8d, 0xbf, 0x14, 0xb0, 0xf4, 0x62, 0xda, 0xe2, 0x07, 0x53, 0x6d, 0xf1, 0x46, - 0x9e, 0x68, 0x73, 0x5b, 0xe2, 0xdd, 0x99, 0x96, 0x58, 0xcf, 0x65, 0x38, 0xbf, 0x1d, 0xfe, 0xbe, - 0x04, 0xd6, 0x0e, 0x48, 0x77, 0x9f, 0x78, 0x96, 0xcd, 0x6c, 0xe2, 0xc1, 0x5d, 0xb0, 0xcc, 0xc6, - 0x83, 0x49, 0x6b, 0xd9, 0x9c, 0x5c, 0xdd, 0x19, 0x0f, 0xf0, 0x59, 0xa0, 0x56, 0xd2, 0x58, 0x6e, - 0x43, 0x02, 0x0d, 0x5b, 0x71, 0x38, 0x8b, 0xc2, 0x6f, 0x77, 0xfa, 0xba, 0xb3, 0x40, 0xcd, 0x18, - 0x9c, 0x5a, 0xcc, 0x34, 0x1d, 0x14, 0xec, 0x83, 0x75, 0x5e, 0x9c, 0x23, 0x9f, 0x74, 0x23, 0x95, - 0x2d, 0x3d, 0x73, 0xd5, 0xaf, 0xca, 0x00, 0xd6, 0x5b, 0x69, 0x22, 0x34, 0xcd, 0x0b, 0x47, 0x91, - 0xc6, 0x3a, 0xbe, 0xe1, 0xd1, 0xe8, 0x27, 0x3d, 0x9f, 0xa6, 0x6b, 0xf2, 0x36, 0xa1, 0xb3, 0x69, - 0x36, 0x94, 0x71, 0x03, 0x7c, 0x15, 0xac, 0xf8, 0xd8, 0xa0, 0xc4, 0x13, 0x7a, 0x2e, 0x25, 0xd5, - 0x41, 0xc2, 0x8a, 0xe4, 0x29, 0x7c, 0x0d, 0xac, 0xba, 0x98, 0x52, 0xa3, 0x8f, 0x45, 0xc7, 0x29, - 0xe9, 0x97, 0x24, 0x70, 0xf5, 0x30, 0x32, 0xa3, 0xc9, 0x79, 0xe3, 0x07, 0x05, 0xac, 0xbe, 0x98, - 0x99, 0xf6, 0xfe, 0xf4, 0x4c, 0xab, 0xe6, 0x29, 0x2f, 0x67, 0x9e, 0xfd, 0x5c, 0x10, 0x81, 0x8a, - 0x59, 0xb6, 0x0d, 0xca, 0x03, 0xc3, 0x37, 0x1c, 0x07, 0x3b, 0x36, 0x75, 0x45, 0xac, 0x05, 0xfd, - 0x12, 0xef, 0xcb, 0x47, 0x89, 0x19, 0xa5, 0x31, 0xdc, 0xc5, 0x24, 0xee, 0xc0, 0xc1, 0x3c, 0x99, - 0x91, 0xdc, 0xa4, 0xcb, 0x7e, 0x62, 0x46, 0x69, 0x0c, 0xbc, 0x0f, 0xae, 0x46, 0x1d, 0x6c, 0x76, - 0x02, 0x2e, 0x89, 0x09, 0xf8, 0xff, 0x30, 0x50, 0xaf, 0xee, 0x65, 0x01, 0x50, 0xb6, 0x1f, 0xdc, - 0x05, 0x6b, 0x5d, 0xc3, 0x3c, 0x25, 0xbd, 0x5e, 0xba, 0x63, 0x57, 0xc2, 0x40, 0x5d, 0xd3, 0x53, - 0x76, 0x34, 0x85, 0x82, 0x5f, 0x80, 0x22, 0xc5, 0x0e, 0x36, 0x19, 0xf1, 0xa5, 0xc4, 0xde, 0xba, - 0x60, 0x55, 0x8c, 0x2e, 0x76, 0xda, 0xd2, 0x55, 0x5f, 0x13, 0x93, 0x5e, 0x7e, 0xa1, 0x98, 0x12, - 0xbe, 0x0b, 0x36, 0x5c, 0xc3, 0x1b, 0x1a, 0x31, 0x52, 0x68, 0xab, 0xa8, 0xc3, 0x30, 0x50, 0x37, - 0x0e, 0xa7, 0x4e, 0xd0, 0x0c, 0x12, 0x7e, 0x02, 0x8a, 0x6c, 0x32, 0x46, 0x57, 0x44, 0x68, 0x99, - 0x83, 0xe2, 0x88, 0x58, 0x53, 0x53, 0x34, 0x56, 0x49, 0x3c, 0x42, 0x63, 0x1a, 0xbe, 0x78, 0x30, - 0xe6, 0xc8, 0x8c, 0xed, 0xf5, 0x18, 0xf6, 0xef, 0xda, 0x9e, 0x4d, 0x4f, 0xb0, 0x55, 0x2d, 0x8a, - 0x74, 0x89, 0xc5, 0xa3, 0xd3, 0x69, 0x65, 0x41, 0x50, 0x9e, 0x2f, 0x6c, 0x81, 0x8d, 0xa4, 0xb4, - 0x87, 0xc4, 0xc2, 0xd5, 0x92, 0x78, 0x18, 0xb7, 0xf8, 0xaf, 0xdc, 0x9f, 0x3a, 0x39, 0x9b, 0xb3, - 0xa0, 0x19, 0xdf, 0xf4, 0xa2, 0x01, 0xf2, 0x17, 0x8d, 0xc6, 0xf7, 0x05, 0x50, 0x4a, 0x66, 0xea, - 0x31, 0x00, 0xe6, 0xa4, 0x71, 0x51, 0x39, 0x57, 0x6f, 0xe6, 0x3d, 0x82, 0xb8, 0xc5, 0x25, 0xf3, - 0x20, 0x36, 0x51, 0x94, 0x22, 0x82, 0x9f, 0x82, 0x92, 0xd8, 0xb6, 0x44, 0x0b, 0x5a, 0x7c, 0xe6, - 0x16, 0xb4, 0x1e, 0x06, 0x6a, 0xa9, 0x3d, 0x21, 0x40, 0x09, 0x17, 0xec, 0xa5, 0x53, 0xf6, 0x9c, - 0xed, 0x14, 0x4e, 0xa7, 0x57, 0x5c, 0x31, 0xc3, 0xca, 0x9b, 0x9a, 0xdc, 0x35, 0x96, 0x45, 0x81, - 0xf3, 0xd6, 0x88, 0x26, 0x28, 0x89, 0xbd, 0x08, 0x5b, 0xd8, 0x12, 0x1a, 0x2d, 0xe8, 0x97, 0x25, - 0xb4, 0xd4, 0x9e, 0x1c, 0xa0, 0x04, 0xc3, 0x89, 0xa3, 0x85, 0x47, 0xae, 0x5d, 0x31, 0x71, 0xb4, - 0x1e, 0x21, 0x79, 0x0a, 0xef, 0x80, 0x8a, 0x0c, 0x09, 0x5b, 0xf7, 0x3c, 0x0b, 0x7f, 0x8d, 0xa9, - 0x78, 0x9a, 0x25, 0xbd, 0x2a, 0x3d, 0x2a, 0xfb, 0x33, 0xe7, 0x68, 0xce, 0x03, 0x7e, 0xab, 0x80, - 0xeb, 0x43, 0xcf, 0x24, 0x43, 0x8f, 0x61, 0xab, 0x83, 0x7d, 0xd7, 0xf6, 0xf8, 0x3f, 0x4f, 0x47, - 0xc4, 0xa2, 0x42, 0xb9, 0xe5, 0x9d, 0xdb, 0x99, 0xc5, 0x3e, 0xce, 0xf6, 0x89, 0x74, 0x9e, 0x73, - 0x88, 0xf2, 0x6e, 0x82, 0x2a, 0x28, 0xf8, 0xd8, 0xb0, 0xc6, 0x42, 0xde, 0x05, 0xbd, 0xc4, 0xdb, - 0x28, 0xe2, 0x06, 0x14, 0xd9, 0x1b, 0xbf, 0x28, 0xe0, 0xd2, 0xcc, 0x56, 0xfb, 0xdf, 0x5f, 0x5b, - 0x1a, 0xbf, 0x2a, 0x20, 0x2f, 0x17, 0xf0, 0x28, 0xad, 0x0b, 0xfe, 0xac, 0x4a, 0xfa, 0xce, 0x94, - 0x26, 0xce, 0x02, 0xf5, 0x66, 0xde, 0xff, 0xbc, 0x7c, 0x0b, 0xa1, 0xda, 0xf1, 0xbd, 0x3b, 0x69, - 0xe1, 0x7c, 0x18, 0x0b, 0x67, 0x51, 0xd0, 0x35, 0x13, 0xd1, 0x5c, 0x8c, 0x4b, 0xba, 0xeb, 0x5b, - 0x8f, 0x9e, 0xd6, 0x17, 0x1e, 0x3f, 0xad, 0x2f, 0x3c, 0x79, 0x5a, 0x5f, 0xf8, 0x26, 0xac, 0x2b, - 0x8f, 0xc2, 0xba, 0xf2, 0x38, 0xac, 0x2b, 0x4f, 0xc2, 0xba, 0xf2, 0x47, 0x58, 0x57, 0xbe, 0xfb, - 0xb3, 0xbe, 0xf0, 0xd9, 0xe2, 0x68, 0xfb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x54, 0x31, 0x16, - 0xcf, 0xa2, 0x10, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/batch/v1/generated.proto", fileDescriptor_79228dc2c4001a22) +} + +var fileDescriptor_79228dc2c4001a22 = []byte{ + // 1882 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x37, 0x6d, 0xcb, 0x96, 0x46, 0xfe, 0x90, 0x27, 0x4e, 0xa2, 0xba, 0x0b, 0xd1, 0xab, 0xec, + 0x06, 0xde, 0x76, 0x2b, 0x6d, 0xbc, 0x41, 0xb7, 0x1f, 0x68, 0xb1, 0xa1, 0xd2, 0x6c, 0xe3, 0x95, + 0x37, 0xea, 0xc8, 0x69, 0x81, 0xdd, 0xb4, 0xe8, 0x88, 0x1c, 0xc9, 0xdc, 0x50, 0x1c, 0x96, 0x1c, + 0x1a, 0xf1, 0xa5, 0x28, 0xd0, 0x7f, 0xa0, 0x3d, 0xf6, 0x1f, 0xe8, 0xb1, 0x97, 0xf6, 0xdc, 0xde, + 0x8a, 0x1c, 0x17, 0x3d, 0x2d, 0x7a, 0x20, 0x1a, 0xf6, 0x0f, 0xe8, 0xdd, 0x45, 0x81, 0x62, 0x86, + 0xc3, 0x4f, 0x91, 0x5e, 0x67, 0x81, 0x06, 0xbd, 0x89, 0xef, 0xfd, 0xde, 0x6f, 0x1e, 0xe7, 0x7d, + 0x52, 0xe0, 0xd6, 0xd3, 0x6f, 0x79, 0x3d, 0x93, 0xf6, 0xb1, 0x63, 0xf6, 0x27, 0x98, 0xe9, 0xa7, + 0xfd, 0xb3, 0x3b, 0xfd, 0x19, 0xb1, 0x89, 0x8b, 0x19, 0x31, 0x7a, 0x8e, 0x4b, 0x19, 0x85, 0xd7, + 0x22, 0x50, 0x0f, 0x3b, 0x66, 0x4f, 0x80, 0x7a, 0x67, 0x77, 0xf6, 0xbe, 0x31, 0x33, 0xd9, 0xa9, + 0x3f, 0xe9, 0xe9, 0x74, 0xde, 0x9f, 0xd1, 0x19, 0xed, 0x0b, 0xec, 0xc4, 0x9f, 0x8a, 0x27, 0xf1, + 0x20, 0x7e, 0x45, 0x1c, 0x7b, 0xdd, 0xcc, 0x41, 0x3a, 0x75, 0x49, 0xc9, 0x39, 0x7b, 0x77, 0x53, + 0xcc, 0x1c, 0xeb, 0xa7, 0xa6, 0x4d, 0xdc, 0xf3, 0xbe, 0xf3, 0x74, 0xc6, 0x05, 0x5e, 0x7f, 0x4e, + 0x18, 0x2e, 0xb3, 0xea, 0x57, 0x59, 0xb9, 0xbe, 0xcd, 0xcc, 0x39, 0x59, 0x30, 0xf8, 0xe6, 0x17, + 0x19, 0x78, 0xfa, 0x29, 0x99, 0xe3, 0xa2, 0x5d, 0xf7, 0xdf, 0x0a, 0x58, 0x1f, 0xb8, 0xd4, 0x3e, + 0xa2, 0x13, 0xf8, 0x73, 0x50, 0xe7, 0xfe, 0x18, 0x98, 0xe1, 0xb6, 0xb2, 0xaf, 0x1c, 0x34, 0x0f, + 0xdf, 0xe9, 0xa5, 0xb7, 0x94, 0xd0, 0xf6, 0x9c, 0xa7, 0x33, 0x2e, 0xf0, 0x7a, 0x1c, 0xdd, 0x3b, + 0xbb, 0xd3, 0x7b, 0x34, 0xf9, 0x94, 0xe8, 0xec, 0x98, 0x30, 0xac, 0xc1, 0xe7, 0x81, 0xba, 0x14, + 0x06, 0x2a, 0x48, 0x65, 0x28, 0x61, 0x85, 0x1a, 0x58, 0xf5, 0x1c, 0xa2, 0xb7, 0x97, 0x05, 0xfb, + 0x7e, 0xaf, 0x24, 0x06, 0x3d, 0xe9, 0xcd, 0xd8, 0x21, 0xba, 0xb6, 0x21, 0xd9, 0x56, 0xf9, 0x13, + 0x12, 0xb6, 0xf0, 0x08, 0xac, 0x79, 0x0c, 0x33, 0xdf, 0x6b, 0xaf, 0x08, 0x96, 0xee, 0xa5, 0x2c, + 0x02, 0xa9, 0x6d, 0x49, 0x9e, 0xb5, 0xe8, 0x19, 0x49, 0x86, 0xee, 0x1f, 0x14, 0xd0, 0x94, 0xc8, + 0xa1, 0xe9, 0x31, 0xf8, 0x64, 0xe1, 0x06, 0x7a, 0x57, 0xbb, 0x01, 0x6e, 0x2d, 0xde, 0xbf, 0x25, + 0x4f, 0xaa, 0xc7, 0x92, 0xcc, 0xdb, 0xdf, 0x03, 0x35, 0x93, 0x91, 0xb9, 0xd7, 0x5e, 0xde, 0x5f, + 0x39, 0x68, 0x1e, 0xbe, 0x76, 0x99, 0xe3, 0xda, 0xa6, 0x24, 0xaa, 0x3d, 0xe4, 0x26, 0x28, 0xb2, + 0xec, 0xfe, 0x6d, 0x35, 0x71, 0x98, 0x5f, 0x09, 0x7c, 0x1b, 0xd4, 0x79, 0x60, 0x0d, 0xdf, 0x22, + 0xc2, 0xe1, 0x46, 0xea, 0xc0, 0x58, 0xca, 0x51, 0x82, 0x80, 0x07, 0xa0, 0xce, 0x73, 0xe1, 0x63, + 0x6a, 0x93, 0x76, 0x5d, 0xa0, 0x37, 0x38, 0xf2, 0x44, 0xca, 0x50, 0xa2, 0x85, 0x8f, 0xc1, 0x4d, + 0x8f, 0x61, 0x97, 0x99, 0xf6, 0xec, 0x3e, 0xc1, 0x86, 0x65, 0xda, 0x64, 0x4c, 0x74, 0x6a, 0x1b, + 0x9e, 0x88, 0xdd, 0x8a, 0xf6, 0xd5, 0x30, 0x50, 0x6f, 0x8e, 0xcb, 0x21, 0xa8, 0xca, 0x16, 0x3e, + 0x01, 0x3b, 0x3a, 0xb5, 0x75, 0xdf, 0x75, 0x89, 0xad, 0x9f, 0x8f, 0xa8, 0x65, 0xea, 0xe7, 0x22, + 0x8c, 0x0d, 0xad, 0x27, 0xfd, 0xde, 0x19, 0x14, 0x01, 0x17, 0x65, 0x42, 0xb4, 0x48, 0x04, 0xdf, + 0x04, 0xeb, 0x9e, 0xef, 0x39, 0xc4, 0x36, 0xda, 0xab, 0xfb, 0xca, 0x41, 0x5d, 0x6b, 0x86, 0x81, + 0xba, 0x3e, 0x8e, 0x44, 0x28, 0xd6, 0xc1, 0x4f, 0x40, 0xf3, 0x53, 0x3a, 0x39, 0x21, 0x73, 0xc7, + 0xc2, 0x8c, 0xb4, 0x6b, 0x22, 0xce, 0x6f, 0x94, 0x06, 0xe3, 0x28, 0xc5, 0x89, 0x7c, 0xbc, 0x26, + 0x9d, 0x6c, 0x66, 0x14, 0x28, 0xcb, 0x06, 0x7f, 0x06, 0xf6, 0x3c, 0x5f, 0xd7, 0x89, 0xe7, 0x4d, + 0x7d, 0xeb, 0x88, 0x4e, 0xbc, 0x1f, 0x9a, 0x1e, 0xa3, 0xee, 0xf9, 0xd0, 0x9c, 0x9b, 0xac, 0xbd, + 0xb6, 0xaf, 0x1c, 0xd4, 0xb4, 0x4e, 0x18, 0xa8, 0x7b, 0xe3, 0x4a, 0x14, 0xba, 0x84, 0x01, 0x22, + 0x70, 0x63, 0x8a, 0x4d, 0x8b, 0x18, 0x0b, 0xdc, 0xeb, 0x82, 0x7b, 0x2f, 0x0c, 0xd4, 0x1b, 0x0f, + 0x4a, 0x11, 0xa8, 0xc2, 0xb2, 0xfb, 0xe7, 0x65, 0xb0, 0x99, 0xab, 0x17, 0xf8, 0x21, 0x58, 0xc3, + 0x3a, 0x33, 0xcf, 0x78, 0x52, 0xf1, 0x54, 0xbd, 0x95, 0xbd, 0x1d, 0xde, 0xe9, 0xd2, 0xaa, 0x47, + 0x64, 0x4a, 0x78, 0x10, 0x48, 0x5a, 0x64, 0xf7, 0x84, 0x29, 0x92, 0x14, 0xd0, 0x02, 0x2d, 0x0b, + 0x7b, 0x2c, 0xce, 0x47, 0x9e, 0x6d, 0x22, 0x3e, 0xcd, 0xc3, 0xaf, 0x5d, 0xad, 0xb8, 0xb8, 0x85, + 0xb6, 0x1b, 0x06, 0x6a, 0x6b, 0x58, 0xe0, 0x41, 0x0b, 0xcc, 0xd0, 0x05, 0x50, 0xc8, 0x92, 0x2b, + 0x14, 0xe7, 0xd5, 0x5e, 0xfa, 0xbc, 0x1b, 0x61, 0xa0, 0xc2, 0xe1, 0x02, 0x13, 0x2a, 0x61, 0xef, + 0xfe, 0x4b, 0x01, 0x2b, 0xaf, 0xa6, 0x81, 0x7e, 0x3f, 0xd7, 0x40, 0x5f, 0xab, 0x4a, 0xda, 0xca, + 0xe6, 0xf9, 0xa0, 0xd0, 0x3c, 0x3b, 0x95, 0x0c, 0x97, 0x37, 0xce, 0xbf, 0xae, 0x80, 0x8d, 0x23, + 0x3a, 0x19, 0x50, 0xdb, 0x30, 0x99, 0x49, 0x6d, 0x78, 0x17, 0xac, 0xb2, 0x73, 0x27, 0x6e, 0x42, + 0xfb, 0xf1, 0xd1, 0x27, 0xe7, 0x0e, 0xb9, 0x08, 0xd4, 0x56, 0x16, 0xcb, 0x65, 0x48, 0xa0, 0xe1, + 0x30, 0x71, 0x67, 0x59, 0xd8, 0xdd, 0xcd, 0x1f, 0x77, 0x11, 0xa8, 0x25, 0x23, 0xb6, 0x97, 0x30, + 0xe5, 0x9d, 0x82, 0x33, 0xb0, 0xc9, 0x83, 0x33, 0x72, 0xe9, 0x24, 0xca, 0xb2, 0x95, 0x97, 0x8e, + 0xfa, 0x75, 0xe9, 0xc0, 0xe6, 0x30, 0x4b, 0x84, 0xf2, 0xbc, 0xf0, 0x2c, 0xca, 0xb1, 0x13, 0x17, + 0xdb, 0x5e, 0xf4, 0x4a, 0x5f, 0x2e, 0xa7, 0xf7, 0xe4, 0x69, 0x22, 0xcf, 0xf2, 0x6c, 0xa8, 0xe4, + 0x04, 0x78, 0x1b, 0xac, 0xb9, 0x04, 0x7b, 0xd4, 0x16, 0xf9, 0xdc, 0x48, 0xa3, 0x83, 0x84, 0x14, + 0x49, 0x2d, 0x7c, 0x0b, 0xac, 0xcf, 0x89, 0xe7, 0xe1, 0x19, 0x11, 0x1d, 0xa7, 0xa1, 0x6d, 0x4b, + 0xe0, 0xfa, 0x71, 0x24, 0x46, 0xb1, 0xbe, 0xfb, 0x7b, 0x05, 0xac, 0xbf, 0x9a, 0xe9, 0xf7, 0xbd, + 0xfc, 0xf4, 0x6b, 0x57, 0x65, 0x5e, 0xc5, 0xe4, 0xfb, 0x5d, 0x43, 0x38, 0x2a, 0xa6, 0xde, 0x1d, + 0xd0, 0x74, 0xb0, 0x8b, 0x2d, 0x8b, 0x58, 0xa6, 0x37, 0x17, 0xbe, 0xd6, 0xb4, 0x6d, 0xde, 0x97, + 0x47, 0xa9, 0x18, 0x65, 0x31, 0xdc, 0x44, 0xa7, 0x73, 0xc7, 0x22, 0xfc, 0x32, 0xa3, 0x74, 0x93, + 0x26, 0x83, 0x54, 0x8c, 0xb2, 0x18, 0xf8, 0x08, 0x5c, 0x8f, 0x3a, 0x58, 0x71, 0x02, 0xae, 0x88, + 0x09, 0xf8, 0x95, 0x30, 0x50, 0xaf, 0xdf, 0x2b, 0x03, 0xa0, 0x72, 0x3b, 0x38, 0x03, 0x2d, 0x87, + 0x1a, 0xbc, 0x39, 0xfb, 0x2e, 0x91, 0xc3, 0xaf, 0x29, 0xee, 0xf9, 0xcd, 0xd2, 0xcb, 0x18, 0x15, + 0xc0, 0x51, 0x0f, 0x2c, 0x4a, 0xd1, 0x02, 0x29, 0xfc, 0x04, 0x6c, 0xca, 0x11, 0x22, 0x4f, 0x69, + 0x5d, 0xb2, 0x29, 0x8d, 0xb3, 0x48, 0x6d, 0x87, 0x27, 0x7f, 0x4e, 0x84, 0xf2, 0x5c, 0xf0, 0x2e, + 0xd8, 0x98, 0x60, 0xfd, 0x29, 0x9d, 0x4e, 0xb3, 0x73, 0xa7, 0x15, 0x06, 0xea, 0x86, 0x96, 0x91, + 0xa3, 0x1c, 0x0a, 0x0e, 0xc1, 0x6e, 0xf6, 0x79, 0x44, 0xdc, 0x87, 0xb6, 0x41, 0x9e, 0xb5, 0x37, + 0x84, 0x75, 0x3b, 0x0c, 0xd4, 0x5d, 0xad, 0x44, 0x8f, 0x4a, 0xad, 0xe0, 0xfb, 0xa0, 0x35, 0xc7, + 0xcf, 0xa2, 0x31, 0x27, 0x24, 0xc4, 0x6b, 0x6f, 0x0a, 0x26, 0x71, 0x45, 0xc7, 0x05, 0x1d, 0x5a, + 0x40, 0xc3, 0x9f, 0x82, 0xba, 0x47, 0x2c, 0xa2, 0x33, 0xea, 0xca, 0xc2, 0x7d, 0xf7, 0x8a, 0xb9, + 0x8e, 0x27, 0xc4, 0x1a, 0x4b, 0xd3, 0x68, 0x7f, 0x8a, 0x9f, 0x50, 0x42, 0x09, 0xbf, 0x03, 0xb6, + 0xe6, 0xd8, 0xf6, 0x71, 0x82, 0x14, 0x15, 0x5b, 0xd7, 0x60, 0x18, 0xa8, 0x5b, 0xc7, 0x39, 0x0d, + 0x2a, 0x20, 0xe1, 0x8f, 0x40, 0x9d, 0xc5, 0xcb, 0xc9, 0x9a, 0x70, 0xad, 0x74, 0xfc, 0x8e, 0xa8, + 0x91, 0xdb, 0x4d, 0x92, 0xda, 0x4b, 0x16, 0x93, 0x84, 0x86, 0xaf, 0x73, 0x8c, 0x59, 0x32, 0x0f, + 0xef, 0x4d, 0x19, 0x71, 0x1f, 0x98, 0xb6, 0xe9, 0x9d, 0x12, 0x43, 0xec, 0x81, 0xb5, 0x68, 0x9d, + 0x3b, 0x39, 0x19, 0x96, 0x41, 0x50, 0x95, 0x2d, 0x1c, 0x82, 0xad, 0xb4, 0x60, 0x8e, 0xa9, 0x41, + 0xda, 0x0d, 0xd1, 0x6e, 0xde, 0xe0, 0x6f, 0x39, 0xc8, 0x69, 0x2e, 0x16, 0x24, 0xa8, 0x60, 0x9b, + 0x5d, 0xdf, 0xc0, 0x25, 0xeb, 0x9b, 0x01, 0x76, 0x1d, 0x6a, 0x20, 0xe2, 0x58, 0x58, 0x27, 0x73, + 0x62, 0x33, 0x99, 0xe3, 0x5b, 0xe2, 0xe8, 0x77, 0x78, 0x26, 0x8d, 0x4a, 0xf4, 0x17, 0x15, 0x72, + 0x54, 0xca, 0x06, 0xbf, 0x0e, 0x1a, 0x73, 0x6c, 0xe3, 0x19, 0x31, 0xb4, 0xf3, 0xf6, 0xb6, 0xa0, + 0xde, 0x0c, 0x03, 0xb5, 0x71, 0x1c, 0x0b, 0x51, 0xaa, 0xef, 0xfe, 0xa7, 0x06, 0x1a, 0xe9, 0xf2, + 0xf4, 0x18, 0x00, 0x3d, 0x9e, 0x50, 0x9e, 0x5c, 0xa0, 0x5e, 0xaf, 0xea, 0x76, 0xc9, 0x2c, 0x4b, + 0x07, 0x7f, 0x22, 0xf2, 0x50, 0x86, 0x08, 0xfe, 0x04, 0x34, 0xc4, 0x5a, 0x2d, 0x66, 0xcd, 0xf2, + 0x4b, 0xcf, 0x1a, 0xe1, 0xfd, 0x38, 0x26, 0x40, 0x29, 0x17, 0x9c, 0x66, 0xa3, 0xf8, 0x25, 0xe7, + 0x26, 0xcc, 0x47, 0x5c, 0x1c, 0x51, 0x60, 0xe5, 0xd3, 0x4b, 0x2e, 0x95, 0xab, 0x22, 0xe7, 0xaa, + 0xf6, 0xc5, 0x3e, 0x68, 0x88, 0x8e, 0x43, 0x0c, 0x62, 0x88, 0xb2, 0xa9, 0x69, 0x3b, 0x12, 0xda, + 0x18, 0xc7, 0x0a, 0x94, 0x62, 0x38, 0x71, 0xb4, 0xd9, 0xca, 0xfd, 0x3a, 0x21, 0x8e, 0x4a, 0x1e, + 0x49, 0x2d, 0x9f, 0x01, 0x8c, 0xb8, 0x73, 0xd3, 0xc6, 0xfc, 0xdb, 0x44, 0xb4, 0x5e, 0x39, 0x03, + 0x4e, 0x52, 0x31, 0xca, 0x62, 0xe0, 0x7d, 0xd0, 0x92, 0x6f, 0x91, 0x36, 0x9a, 0x75, 0x91, 0x0d, + 0x6d, 0x79, 0x48, 0x6b, 0x50, 0xd0, 0xa3, 0x05, 0x0b, 0xf8, 0x1e, 0xd8, 0x9c, 0xe6, 0x7a, 0x15, + 0x10, 0x14, 0xa2, 0xd7, 0xe6, 0x1b, 0x55, 0x1e, 0x07, 0x7f, 0xad, 0x80, 0x9b, 0xbe, 0xad, 0x53, + 0xdf, 0x66, 0xc4, 0x88, 0x9d, 0x24, 0xc6, 0x88, 0x1a, 0x9e, 0x28, 0xdc, 0xe6, 0xe1, 0xdb, 0xa5, + 0x89, 0xf5, 0xb8, 0xdc, 0x26, 0x2a, 0xf3, 0x0a, 0x25, 0xaa, 0x3a, 0x09, 0xaa, 0xa0, 0xe6, 0x12, + 0x6c, 0x9c, 0x8b, 0xea, 0xae, 0x69, 0x0d, 0x3e, 0x9b, 0x11, 0x17, 0xa0, 0x48, 0xde, 0xfd, 0xa3, + 0x02, 0xb6, 0x0b, 0x9f, 0x4a, 0xff, 0xff, 0xbb, 0x70, 0x77, 0x02, 0x16, 0x66, 0x29, 0xfc, 0x08, + 0xd4, 0x5c, 0xdf, 0x22, 0x71, 0xd9, 0xbe, 0x75, 0xa5, 0xb9, 0x8c, 0x7c, 0x8b, 0xa4, 0x5b, 0x0b, + 0x7f, 0xf2, 0x50, 0x44, 0xd3, 0xfd, 0xbb, 0x02, 0x6e, 0x17, 0xe1, 0x8f, 0xec, 0x1f, 0x3c, 0x33, + 0xd9, 0x80, 0x1a, 0xc4, 0x43, 0xe4, 0x17, 0xbe, 0xe9, 0x8a, 0xbe, 0xc3, 0x93, 0x44, 0xa7, 0x36, + 0xc3, 0xfc, 0x5a, 0x3e, 0xc2, 0xf3, 0x78, 0x95, 0x16, 0x49, 0x32, 0xc8, 0x2a, 0x50, 0x1e, 0x07, + 0xc7, 0xa0, 0x4e, 0x1d, 0xe2, 0x62, 0x3e, 0x65, 0xa2, 0x35, 0xfa, 0xbd, 0x78, 0x14, 0x3c, 0x92, + 0xf2, 0x8b, 0x40, 0xbd, 0x75, 0x89, 0x1b, 0x31, 0x0c, 0x25, 0x44, 0xb0, 0x0b, 0xd6, 0xce, 0xb0, + 0xe5, 0x13, 0xbe, 0xed, 0xac, 0x1c, 0xd4, 0x34, 0xc0, 0xeb, 0xe9, 0xc7, 0x42, 0x82, 0xa4, 0xa6, + 0xfb, 0x97, 0xd2, 0x97, 0x1b, 0x51, 0x23, 0xed, 0x60, 0x23, 0xcc, 0x18, 0x71, 0x6d, 0xf8, 0x41, + 0xee, 0xf3, 0xe0, 0xdd, 0xc2, 0xe7, 0xc1, 0xad, 0x92, 0x25, 0x3f, 0x4b, 0xf3, 0xbf, 0xfa, 0x62, + 0xe8, 0x3e, 0x5f, 0x06, 0xbb, 0x65, 0xd1, 0x84, 0xef, 0x47, 0xbd, 0x8a, 0xda, 0xd2, 0xe3, 0x83, + 0x6c, 0xaf, 0xa2, 0xf6, 0x45, 0xa0, 0xde, 0x28, 0xda, 0x45, 0x1a, 0x24, 0xed, 0xa0, 0x0d, 0x9a, + 0x34, 0xbd, 0x61, 0x99, 0xa4, 0xdf, 0xbd, 0x52, 0x3e, 0x95, 0x27, 0x48, 0xd4, 0xa9, 0xb2, 0xba, + 0xec, 0x01, 0xf0, 0x97, 0x60, 0x9b, 0xe6, 0xef, 0x5e, 0x44, 0xee, 0xea, 0x67, 0x96, 0xc5, 0x4d, + 0xbb, 0x29, 0xdf, 0x7b, 0xbb, 0xa0, 0x47, 0xc5, 0xc3, 0xba, 0x4f, 0x40, 0x7e, 0x6d, 0x84, 0x1f, + 0xe6, 0x4b, 0xe9, 0xf6, 0x17, 0x2f, 0x9f, 0x97, 0xd4, 0xd1, 0x6f, 0x15, 0xb0, 0xb3, 0x80, 0xe5, + 0x6b, 0x60, 0x32, 0x05, 0xe2, 0xd6, 0x1a, 0xc5, 0x4b, 0xac, 0x81, 0xe3, 0x82, 0x0e, 0x2d, 0xa0, + 0xf9, 0x9e, 0x96, 0xc8, 0x06, 0xbc, 0xf9, 0xc9, 0x2f, 0x03, 0x31, 0xcf, 0xc6, 0x39, 0x0d, 0x2a, + 0x20, 0xbb, 0x7f, 0x52, 0x40, 0x55, 0x2f, 0x85, 0xa3, 0xec, 0x0c, 0xe3, 0x17, 0xd0, 0xd0, 0x0e, + 0x73, 0xf3, 0xeb, 0x22, 0x50, 0x5f, 0xaf, 0xfa, 0xcb, 0x96, 0x27, 0xba, 0xd7, 0x7b, 0xfc, 0xf0, + 0x7e, 0x76, 0xc8, 0x7d, 0x90, 0x0c, 0xb9, 0x65, 0x41, 0xd7, 0x4f, 0x07, 0xdc, 0xd5, 0xb8, 0xa4, + 0xb9, 0xf6, 0xed, 0xe7, 0x2f, 0x3a, 0x4b, 0x9f, 0xbd, 0xe8, 0x2c, 0x7d, 0xfe, 0xa2, 0xb3, 0xf4, + 0xab, 0xb0, 0xa3, 0x3c, 0x0f, 0x3b, 0xca, 0x67, 0x61, 0x47, 0xf9, 0x3c, 0xec, 0x28, 0xff, 0x08, + 0x3b, 0xca, 0x6f, 0xfe, 0xd9, 0x59, 0xfa, 0xf8, 0x5a, 0xc9, 0x7f, 0xe8, 0xff, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0x1e, 0x70, 0x68, 0xe1, 0x59, 0x17, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -587,6 +794,13 @@ func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TimeZone != nil { + i -= len(*m.TimeZone) + copy(dAtA[i:], *m.TimeZone) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TimeZone))) + i-- + dAtA[i] = 0x42 + } if m.FailedJobsHistoryLimit != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) i-- @@ -879,6 +1093,56 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SuccessPolicy != nil { + { + size, err := m.SuccessPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.ManagedBy != nil { + i -= len(*m.ManagedBy) + copy(dAtA[i:], *m.ManagedBy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ManagedBy))) + i-- + dAtA[i] = 0x7a + } + if m.PodReplacementPolicy != nil { + i -= len(*m.PodReplacementPolicy) + copy(dAtA[i:], *m.PodReplacementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PodReplacementPolicy))) + i-- + dAtA[i] = 0x72 + } + if m.MaxFailedIndexes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxFailedIndexes)) + i-- + dAtA[i] = 0x68 + } + if m.BackoffLimitPerIndex != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimitPerIndex)) + i-- + dAtA[i] = 0x60 + } + if m.PodFailurePolicy != nil { + { + size, err := m.PodFailurePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.Suspend != nil { i-- if *m.Suspend { @@ -976,6 +1240,18 @@ func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Terminating != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Terminating)) + i-- + dAtA[i] = 0x58 + } + if m.FailedIndexes != nil { + i -= len(*m.FailedIndexes) + copy(dAtA[i:], *m.FailedIndexes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailedIndexes))) + i-- + dAtA[i] = 0x52 + } if m.Ready != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Ready)) i-- @@ -1091,7 +1367,7 @@ func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) { +func (m *PodFailurePolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1101,30 +1377,26 @@ func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UncountedTerminatedPods) MarshalTo(dAtA []byte) (int, error) { +func (m *PodFailurePolicy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *UncountedTerminatedPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodFailurePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Failed) > 0 { - for iNdEx := len(m.Failed) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Failed[iNdEx]) - copy(dAtA[i:], m.Failed[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Failed[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Succeeded) > 0 { - for iNdEx := len(m.Succeeded) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Succeeded[iNdEx]) - copy(dAtA[i:], m.Succeeded[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Succeeded[iNdEx]))) + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } @@ -1132,90 +1404,336 @@ func (m *UncountedTerminatedPods) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PodFailurePolicyOnExitCodesRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *CronJob) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + +func (m *PodFailurePolicyOnExitCodesRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CronJobList) Size() (n int) { - if m == nil { - return 0 - } +func (m *PodFailurePolicyOnExitCodesRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Values[iNdEx])) + i-- + dAtA[i] = 0x18 } } - return n + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + if m.ContainerName != nil { + i -= len(*m.ContainerName) + copy(dAtA[i:], *m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ContainerName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *CronJobSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Schedule) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartingDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) - } - l = len(m.ConcurrencyPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.Suspend != nil { - n += 2 - } - l = m.JobTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.SuccessfulJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) - } - if m.FailedJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) +func (m *PodFailurePolicyOnPodConditionsPattern) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *CronJobStatus) Size() (n int) { - if m == nil { - return 0 - } +func (m *PodFailurePolicyOnPodConditionsPattern) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodFailurePolicyOnPodConditionsPattern) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Active) > 0 { - for _, e := range m.Active { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.LastScheduleTime != nil { - l = m.LastScheduleTime.Size() + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodFailurePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodFailurePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodFailurePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OnPodConditions) > 0 { + for iNdEx := len(m.OnPodConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OnPodConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.OnExitCodes != nil { + { + size, err := m.OnExitCodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SuccessPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuccessPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SuccessPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SuccessPolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuccessPolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SuccessPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SucceededCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SucceededCount)) + i-- + dAtA[i] = 0x10 + } + if m.SucceededIndexes != nil { + i -= len(*m.SucceededIndexes) + copy(dAtA[i:], *m.SucceededIndexes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SucceededIndexes))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UncountedTerminatedPods) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UncountedTerminatedPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Failed) > 0 { + for iNdEx := len(m.Failed) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Failed[iNdEx]) + copy(dAtA[i:], m.Failed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Failed[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Succeeded) > 0 { + for iNdEx := len(m.Succeeded) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Succeeded[iNdEx]) + copy(dAtA[i:], m.Succeeded[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Succeeded[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Suspend != nil { + n += 2 + } + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } + if m.TimeZone != nil { + l = len(*m.TimeZone) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() n += 1 + l + sovGenerated(uint64(l)) } if m.LastSuccessfulTime != nil { @@ -1315,6 +1833,28 @@ func (m *JobSpec) Size() (n int) { if m.Suspend != nil { n += 2 } + if m.PodFailurePolicy != nil { + l = m.PodFailurePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BackoffLimitPerIndex != nil { + n += 1 + sovGenerated(uint64(*m.BackoffLimitPerIndex)) + } + if m.MaxFailedIndexes != nil { + n += 1 + sovGenerated(uint64(*m.MaxFailedIndexes)) + } + if m.PodReplacementPolicy != nil { + l = len(*m.PodReplacementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManagedBy != nil { + l = len(*m.ManagedBy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SuccessPolicy != nil { + l = m.SuccessPolicy.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -1350,6 +1890,13 @@ func (m *JobStatus) Size() (n int) { if m.Ready != nil { n += 1 + sovGenerated(uint64(*m.Ready)) } + if m.FailedIndexes != nil { + l = len(*m.FailedIndexes) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminating != nil { + n += 1 + sovGenerated(uint64(*m.Terminating)) + } return n } @@ -1366,19 +1913,119 @@ func (m *JobTemplateSpec) Size() (n int) { return n } -func (m *UncountedTerminatedPods) Size() (n int) { +func (m *PodFailurePolicy) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Succeeded) > 0 { - for _, s := range m.Succeeded { - l = len(s) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Failed) > 0 { + return n +} + +func (m *PodFailurePolicyOnExitCodesRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ContainerName != nil { + l = len(*m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, e := range m.Values { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodFailurePolicyOnPodConditionsPattern) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodFailurePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.OnExitCodes != nil { + l = m.OnExitCodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.OnPodConditions) > 0 { + for _, e := range m.OnPodConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SuccessPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SuccessPolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SucceededIndexes != nil { + l = len(*m.SucceededIndexes) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SucceededCount != nil { + n += 1 + sovGenerated(uint64(*m.SucceededCount)) + } + return n +} + +func (m *UncountedTerminatedPods) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Succeeded) > 0 { + for _, s := range m.Succeeded { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Failed) > 0 { for _, s := range m.Failed { l = len(s) n += 1 + l + sovGenerated(uint64(l)) @@ -1433,6 +2080,7 @@ func (this *CronJobSpec) String() string { `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `TimeZone:` + valueToStringGenerated(this.TimeZone) + `,`, `}`, }, "") return s @@ -1512,6 +2160,12 @@ func (this *JobSpec) String() string { `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, `CompletionMode:` + valueToStringGenerated(this.CompletionMode) + `,`, `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `PodFailurePolicy:` + strings.Replace(this.PodFailurePolicy.String(), "PodFailurePolicy", "PodFailurePolicy", 1) + `,`, + `BackoffLimitPerIndex:` + valueToStringGenerated(this.BackoffLimitPerIndex) + `,`, + `MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`, + `PodReplacementPolicy:` + valueToStringGenerated(this.PodReplacementPolicy) + `,`, + `ManagedBy:` + valueToStringGenerated(this.ManagedBy) + `,`, + `SuccessPolicy:` + strings.Replace(this.SuccessPolicy.String(), "SuccessPolicy", "SuccessPolicy", 1) + `,`, `}`, }, "") return s @@ -1535,6 +2189,8 @@ func (this *JobStatus) String() string { `CompletedIndexes:` + fmt.Sprintf("%v", this.CompletedIndexes) + `,`, `UncountedTerminatedPods:` + strings.Replace(this.UncountedTerminatedPods.String(), "UncountedTerminatedPods", "UncountedTerminatedPods", 1) + `,`, `Ready:` + valueToStringGenerated(this.Ready) + `,`, + `FailedIndexes:` + valueToStringGenerated(this.FailedIndexes) + `,`, + `Terminating:` + valueToStringGenerated(this.Terminating) + `,`, `}`, }, "") return s @@ -1550,6 +2206,87 @@ func (this *JobTemplateSpec) String() string { }, "") return s } +func (this *PodFailurePolicy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PodFailurePolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PodFailurePolicyRule", "PodFailurePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&PodFailurePolicy{`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *PodFailurePolicyOnExitCodesRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodFailurePolicyOnExitCodesRequirement{`, + `ContainerName:` + valueToStringGenerated(this.ContainerName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *PodFailurePolicyOnPodConditionsPattern) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodFailurePolicyOnPodConditionsPattern{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} +func (this *PodFailurePolicyRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForOnPodConditions := "[]PodFailurePolicyOnPodConditionsPattern{" + for _, f := range this.OnPodConditions { + repeatedStringForOnPodConditions += strings.Replace(strings.Replace(f.String(), "PodFailurePolicyOnPodConditionsPattern", "PodFailurePolicyOnPodConditionsPattern", 1), `&`, ``, 1) + "," + } + repeatedStringForOnPodConditions += "}" + s := strings.Join([]string{`&PodFailurePolicyRule{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `OnExitCodes:` + strings.Replace(this.OnExitCodes.String(), "PodFailurePolicyOnExitCodesRequirement", "PodFailurePolicyOnExitCodesRequirement", 1) + `,`, + `OnPodConditions:` + repeatedStringForOnPodConditions + `,`, + `}`, + }, "") + return s +} +func (this *SuccessPolicy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]SuccessPolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "SuccessPolicyRule", "SuccessPolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&SuccessPolicy{`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *SuccessPolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SuccessPolicyRule{`, + `SucceededIndexes:` + valueToStringGenerated(this.SucceededIndexes) + `,`, + `SucceededCount:` + valueToStringGenerated(this.SucceededCount) + `,`, + `}`, + }, "") + return s +} func (this *UncountedTerminatedPods) String() string { if this == nil { return "nil" @@ -1633,9 +2370,938 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CronJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedJobsHistoryLimit = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TimeZone = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, v11.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &v1.Time{} + } + if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastSuccessfulTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastSuccessfulTime == nil { + m.LastSuccessfulTime = &v1.Time{} + } + if err := m.LastSuccessfulTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Job) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1645,30 +3311,29 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1678,24 +3343,23 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1718,7 +3382,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } return nil } -func (m *CronJobList) Unmarshal(dAtA []byte) error { +func (m *JobList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1741,10 +3405,10 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") + return fmt.Errorf("proto: JobList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1809,7 +3473,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, CronJob{}) + m.Items = append(m.Items, Job{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1835,7 +3499,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } return nil } -func (m *CronJobSpec) Unmarshal(dAtA []byte) error { +func (m *JobSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1858,17 +3522,17 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1878,27 +3542,35 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Schedule = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.Completions = &v + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) } var v int64 for shift := uint(0); ; shift += 7 { @@ -1915,12 +3587,12 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { break } } - m.StartingDeadlineSeconds = &v - case 3: + m.ActiveDeadlineSeconds = &v + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1930,27 +3602,31 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -1968,10 +3644,10 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } } b := bool(v != 0) - m.Suspend = &b - case 5: + m.ManualSelector = &b + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1998,13 +3674,13 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -2021,10 +3697,10 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { break } } - m.SuccessfulJobsHistoryLimit = &v - case 7: + m.BackoffLimit = &v + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TTLSecondsAfterFinished", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -2041,62 +3717,142 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { break } } - m.FailedJobsHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + m.TTLSecondsAfterFinished = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionMode", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + s := CompletionMode(dAtA[iNdEx:postIndex]) + m.CompletionMode = &s + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFailurePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFailurePolicy == nil { + m.PodFailurePolicy = &PodFailurePolicy{} + } + if err := m.PodFailurePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimitPerIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.BackoffLimitPerIndex = &v + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxFailedIndexes", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.MaxFailedIndexes = &v + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodReplacementPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2106,31 +3862,30 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, v11.ObjectReference{}) - if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := PodReplacementPolicy(dAtA[iNdEx:postIndex]) + m.PodReplacementPolicy = &s iNdEx = postIndex - case 4: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ManagedBy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2140,31 +3895,28 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastScheduleTime == nil { - m.LastScheduleTime = &v1.Time{} - } - if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.ManagedBy = &s iNdEx = postIndex - case 5: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastSuccessfulTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SuccessPolicy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2191,10 +3943,10 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastSuccessfulTime == nil { - m.LastSuccessfulTime = &v1.Time{} + if m.SuccessPolicy == nil { + m.SuccessPolicy = &SuccessPolicy{} } - if err := m.LastSuccessfulTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SuccessPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2219,7 +3971,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *Job) Unmarshal(dAtA []byte) error { +func (m *JobStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2242,15 +3994,15 @@ func (m *Job) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Job: wiretype end group for non-group") + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2277,13 +4029,14 @@ func (m *Job) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2310,13 +4063,16 @@ func (m *Job) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.StartTime == nil { + m.StartTime = &v1.Time{} + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2343,65 +4099,18 @@ func (m *Job) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.CompletionTime == nil { + m.CompletionTime = &v1.Time{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) } - var stringLen uint64 + m.Active = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2411,29 +4120,16 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Active |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = JobConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) } - var stringLen uint64 + m.Succeeded = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2443,29 +4139,35 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Succeeded |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CompletedIndexes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2475,28 +4177,27 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CompletedIndexes = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UncountedTerminatedPods", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2523,15 +4224,18 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.UncountedTerminatedPods == nil { + m.UncountedTerminatedPods = &UncountedTerminatedPods{} + } + if err := m.UncountedTerminatedPods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2541,27 +4245,15 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: + m.Ready = &v + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailedIndexes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2589,8 +4281,29 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.FailedIndexes = &s iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminating = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2612,7 +4325,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobList) Unmarshal(dAtA []byte) error { +func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2635,15 +4348,15 @@ func (m *JobList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobList: wiretype end group for non-group") + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2670,13 +4383,13 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2703,8 +4416,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Job{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2729,7 +4441,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobSpec) Unmarshal(dAtA []byte) error { +func (m *PodFailurePolicy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2752,75 +4464,15 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + return fmt.Errorf("proto: PodFailurePolicy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodFailurePolicy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Parallelism = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Completions = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2847,39 +4499,66 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, PodFailurePolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - b := bool(v != 0) - m.ManualSelector = &b - case 6: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodFailurePolicyOnExitCodesRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodFailurePolicyOnExitCodesRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodFailurePolicyOnExitCodesRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2889,30 +4568,30 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.ContainerName = &s iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2922,35 +4601,153 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.BackoffLimit = &v - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTLSecondsAfterFinished", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = PodFailurePolicyOnExitCodesOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Values) == 0 { + m.Values = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodFailurePolicyOnPodConditionsPattern) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.TTLSecondsAfterFinished = &v - case 9: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodFailurePolicyOnPodConditionsPattern: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodFailurePolicyOnPodConditionsPattern: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2978,14 +4775,13 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := CompletionMode(dAtA[iNdEx:postIndex]) - m.CompletionMode = &s + m.Type = k8s_io_api_core_v1.PodConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2995,13 +4791,24 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Suspend = &b + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3023,7 +4830,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobStatus) Unmarshal(dAtA []byte) error { +func (m *PodFailurePolicyRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3046,17 +4853,17 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + return fmt.Errorf("proto: PodFailurePolicyRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodFailurePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3066,29 +4873,27 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Action = PodFailurePolicyAction(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnExitCodes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3115,16 +4920,16 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartTime == nil { - m.StartTime = &v1.Time{} + if m.OnExitCodes == nil { + m.OnExitCodes = &PodFailurePolicyOnExitCodesRequirement{} } - if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.OnExitCodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnPodConditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3151,105 +4956,64 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CompletionTime == nil { - m.CompletionTime = &v1.Time{} - } - if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.OnPodConditions = append(m.OnPodConditions, PodFailurePolicyOnPodConditionsPattern{}) + if err := m.OnPodConditions[len(m.OnPodConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - m.Active = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Active |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - m.Succeeded = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Succeeded |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) - } - m.Failed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failed |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletedIndexes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SuccessPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.CompletedIndexes = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SuccessPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SuccessPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UncountedTerminatedPods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3276,33 +5040,11 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.UncountedTerminatedPods == nil { - m.UncountedTerminatedPods = &UncountedTerminatedPods{} - } - if err := m.UncountedTerminatedPods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Rules = append(m.Rules, SuccessPolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Ready = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3324,7 +5066,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { +func (m *SuccessPolicyRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3347,17 +5089,17 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + return fmt.Errorf("proto: SuccessPolicyRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SuccessPolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SucceededIndexes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3367,30 +5109,30 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.SucceededIndexes = &s iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SucceededCount", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3400,25 +5142,12 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.SucceededCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 161886029..f5a9385f5 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -27,14 +27,14 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/batch/v1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +52,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -63,6 +63,18 @@ message CronJobSpec { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. optional string schedule = 1; + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + // +optional + optional string timeZone = 8; + // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional @@ -70,6 +82,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -100,15 +113,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // Job represents the configuration of a single job. @@ -116,7 +129,7 @@ message Job { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -139,11 +152,11 @@ message JobCondition { // Last time the condition was checked. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -159,7 +172,7 @@ message JobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Jobs. repeated Job items = 2; @@ -176,7 +189,7 @@ message JobSpec { optional int32 parallelism = 1; // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -192,16 +205,62 @@ message JobSpec { // +optional optional int64 activeDeadlineSeconds = 3; + // Specifies the policy of handling failed pods. In particular, it allows to + // specify the set of actions and conditions which need to be + // satisfied to take the associated action. + // If empty, the default behaviour applies - the counter of failed pods, + // represented by the jobs's .status.failed field, is incremented and it is + // checked against the backoffLimit. This field cannot be used in combination + // with restartPolicy=OnFailure. + // + // +optional + optional PodFailurePolicy podFailurePolicy = 11; + + // successPolicy specifies the policy when the Job can be declared as succeeded. + // If empty, the default behavior applies - the Job is declared as succeeded + // only when the number of succeeded pods equals to the completions. + // When the field is specified, it must be immutable and works only for the Indexed Jobs. + // Once the Job meets the SuccessPolicy, the lingering pods are terminated. + // + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). + // +optional + optional SuccessPolicy successPolicy = 16; + // Specifies the number of retries before marking this job failed. // Defaults to 6 // +optional optional int32 backoffLimit = 7; + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + optional int32 backoffLimitPerIndex = 12; + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + optional int32 maxFailedIndexes = 13; + // A label query over pods that should match the pod count. // Normally, the system sets this field for you. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -217,8 +276,9 @@ message JobSpec { optional bool manualSelector = 5; // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - optional k8s.io.api.core.v1.PodTemplateSpec template = 6; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 6; // ttlSecondsAfterFinished limits the lifetime of a Job that has finished // execution (either Complete or Failed). If this field is set, @@ -230,7 +290,7 @@ message JobSpec { // +optional optional int32 ttlSecondsAfterFinished = 8; - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -248,13 +308,14 @@ message JobSpec { // `$(job-name)-$(index)-$(random-string)`, // the Pod hostname takes the form `$(job-name)-$(index)`. // - // This field is beta-level. More completion modes can be added in the future. - // If the Job controller observes a mode that it doesn't recognize, the - // controller skips updates for the Job. + // More completion modes can be added in the future. + // If the Job controller observes a mode that it doesn't recognize, which + // is possible during upgrades due to version skew, the controller + // skips updates for the Job. // +optional optional string completionMode = 9; - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -262,11 +323,37 @@ message JobSpec { // Suspending a Job will reset the StartTime field of the Job, effectively // resetting the ActiveDeadlineSeconds timer too. Defaults to false. // - // This field is beta-level, gated by SuspendJob feature flag (enabled by - // default). - // // +optional optional bool suspend = 10; + + // podReplacementPolicy specifies when to create replacement Pods. + // Possible values are: + // - TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + // - Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + // + // When using podFailurePolicy, Failed is the the only allowed value. + // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + // This is on by default. + // +optional + optional string podReplacementPolicy = 14; + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + optional string managedBy = 15; } // JobStatus represents the current state of a Job. @@ -277,6 +364,12 @@ message JobStatus { // status true; when the Job is resumed, the status of this condition will // become false. When a Job is completed, one of the conditions will have // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -288,29 +381,48 @@ message JobStatus { // Job is created in the suspended state, this field is not set until the // first time it is resumed. This field is reset every time a Job is resumed // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - // The completion time is only set when the job finishes successfully. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; - // The number of pending and running pods. + // The number of pending and running pods which are not terminating (without + // a deletionTimestamp). + // The value is zero for finished jobs. // +optional optional int32 active = 4; // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. // +optional optional int32 succeeded = 5; // The number of pods which reached phase Failed. + // The value increases monotonically. // +optional optional int32 failed = 6; - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // The number of pods which are terminating (in phase Pending or Running + // and have a deletionTimestamp). + // + // This field is beta-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (enabled by default). + // +optional + optional int32 terminating = 11; + + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -320,30 +432,41 @@ message JobStatus { // +optional optional string completedIndexes = 7; - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + optional string failedIndexes = 10; + + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // - // This field is beta-level. The job controller only makes use of this field - // when the feature gate JobTrackingWithFinalizers is enabled (enabled - // by default). // Old jobs might not be tracked using this field, in which case the field // remains null. + // The structure is empty for finished jobs. // +optional optional UncountedTerminatedPods uncountedTerminatedPods = 8; - // The number of pods which have a Ready condition. - // - // This field is alpha-level. The job controller populates the field when - // the feature gate JobReadyPods is enabled (disabled by default). - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). optional int32 ready = 9; } @@ -352,7 +475,7 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -360,15 +483,153 @@ message JobTemplateSpec { optional JobSpec spec = 2; } +// PodFailurePolicy describes how failed pods influence the backoffLimit. +message PodFailurePolicy { + // A list of pod failure policy rules. The rules are evaluated in order. + // Once a rule matches a Pod failure, the remaining of the rules are ignored. + // When no rule matches the Pod failure, the default handling applies - the + // counter of pod failures is incremented and it is checked against + // the backoffLimit. At most 20 elements are allowed. + // +listType=atomic + repeated PodFailurePolicyRule rules = 1; +} + +// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling +// a failed pod based on its container exit codes. In particular, it lookups the +// .state.terminated.exitCode for each app container and init container status, +// represented by the .status.containerStatuses and .status.initContainerStatuses +// fields in the Pod status, respectively. Containers completed with success +// (exit code 0) are excluded from the requirement check. +message PodFailurePolicyOnExitCodesRequirement { + // Restricts the check for exit codes to the container with the + // specified name. When null, the rule applies to all containers. + // When specified, it should match one the container or initContainer + // names in the pod template. + // +optional + optional string containerName = 1; + + // Represents the relationship between the container exit code(s) and the + // specified values. Containers completed with success (exit code 0) are + // excluded from the requirement check. Possible values are: + // + // - In: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is in the set of specified values. + // - NotIn: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is not in the set of specified values. + // Additional values are considered to be added in the future. Clients should + // react to an unknown operator by assuming the requirement is not satisfied. + optional string operator = 2; + + // Specifies the set of values. Each returned container exit code (might be + // multiple in case of multiple containers) is checked against this set of + // values with respect to the operator. The list of values must be ordered + // and must not contain duplicates. Value '0' cannot be used for the In operator. + // At least one element is required. At most 255 elements are allowed. + // +listType=set + repeated int32 values = 3; +} + +// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching +// an actual pod condition type. +message PodFailurePolicyOnPodConditionsPattern { + // Specifies the required Pod condition type. To match a pod condition + // it is required that specified type equals the pod condition type. + optional string type = 1; + + // Specifies the required Pod condition status. To match a pod condition + // it is required that the specified status equals the pod condition status. + // Defaults to True. + optional string status = 2; +} + +// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. +message PodFailurePolicyRule { + // Specifies the action taken on a pod failure when the requirements are satisfied. + // Possible values are: + // + // - FailJob: indicates that the pod's job is marked as Failed and all + // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + // - Ignore: indicates that the counter towards the .backoffLimit is not + // incremented and a replacement pod is created. + // - Count: indicates that the pod is handled in the default way - the + // counter towards the .backoffLimit is incremented. + // Additional values are considered to be added in the future. Clients should + // react to an unknown action by skipping the rule. + optional string action = 1; + + // Represents the requirement on the container exit codes. + // +optional + optional PodFailurePolicyOnExitCodesRequirement onExitCodes = 2; + + // Represents the requirement on the pod conditions. The requirement is represented + // as a list of pod condition patterns. The requirement is satisfied if at + // least one pattern matches an actual pod condition. At most 20 elements are allowed. + // +listType=atomic + // +optional + repeated PodFailurePolicyOnPodConditionsPattern onPodConditions = 3; +} + +// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes. +message SuccessPolicy { + // rules represents the list of alternative rules for the declaring the Jobs + // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, + // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // The terminal state for such a Job has the "Complete" condition. + // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, + // other rules are ignored. At most 20 elements are allowed. + // +listType=atomic + repeated SuccessPolicyRule rules = 1; +} + +// SuccessPolicyRule describes rule for declaring a Job as succeeded. +// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified. +message SuccessPolicyRule { + // succeededIndexes specifies the set of indexes + // which need to be contained in the actual set of the succeeded indexes for the Job. + // The list of indexes must be within 0 to ".spec.completions-1" and + // must not contain duplicates. At least one element is required. + // The indexes are represented as intervals separated by commas. + // The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. + // The number are listed in represented by the first and last element of the series, + // separated by a hyphen. + // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // When this field is null, this field doesn't default to any value + // and is never evaluated at any time. + // + // +optional + optional string succeededIndexes = 1; + + // succeededCount specifies the minimal required size of the actual set of the succeeded indexes + // for the Job. When succeededCount is used along with succeededIndexes, the check is + // constrained only to the set of indexes specified by succeededIndexes. + // For example, given that succeededIndexes is "1-4", succeededCount is "3", + // and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded + // because only "1" and "3" indexes are considered in that rules. + // When this field is null, this doesn't default to any value and + // is never evaluated at any time. + // When specified it needs to be a positive integer. + // + // +optional + optional int32 succeededCount = 2; +} + // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. message UncountedTerminatedPods { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional repeated string succeeded = 1; - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional repeated string failed = 2; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 13cebde3f..b42ec231e 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -17,25 +17,54 @@ limitations under the License. package v1 import ( - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) const ( - JobCompletionIndexAnnotation = "batch.kubernetes.io/job-completion-index" + // All Kubernetes labels need to be prefixed with Kubernetes to distinguish them from end-user labels + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions + labelPrefix = "batch.kubernetes.io/" + // CronJobScheduledTimestampAnnotation is the scheduled timestamp annotation for the Job. + // It records the original/expected scheduled timestamp for the running job, represented in RFC3339. + // The CronJob controller adds this annotation if the CronJobsScheduledAnnotation feature gate (beta in 1.28) is enabled. + CronJobScheduledTimestampAnnotation = labelPrefix + "cronjob-scheduled-timestamp" + + JobCompletionIndexAnnotation = labelPrefix + "job-completion-index" // JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from // being deleted before being accounted in the Job status. - // The apiserver and job controller use this string as a Job annotation, to - // mark Jobs that are being tracked using pod finalizers. Two releases after - // the JobTrackingWithFinalizers graduates to GA, JobTrackingFinalizer will - // no longer be used as a Job annotation. - JobTrackingFinalizer = "batch.kubernetes.io/job-tracking" + // + // Additionally, the apiserver and job controller use this string as a Job + // annotation, to mark Jobs that are being tracked using pod finalizers. + // However, this behavior is deprecated in kubernetes 1.26. This means that, in + // 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the + // apiserver and job controller will ignore this annotation and they will + // always track jobs using finalizers. + JobTrackingFinalizer = labelPrefix + "job-tracking" + // The Job labels will use batch.kubernetes.io as a prefix for all labels + // Historically the job controller uses unprefixed labels for job-name and controller-uid and + // Kubernetes continutes to recognize those unprefixed labels for consistency. + JobNameLabel = labelPrefix + "job-name" + // ControllerUid is used to programatically get pods corresponding to a Job. + // There is a corresponding label without the batch.kubernetes.io that we support for legacy reasons. + ControllerUidLabel = labelPrefix + "controller-uid" + // Annotation indicating the number of failures for the index corresponding + // to the pod, which are counted towards the backoff limit. + JobIndexFailureCountAnnotation = labelPrefix + "job-index-failure-count" + // Annotation indicating the number of failures for the index corresponding + // to the pod, which don't count towards the backoff limit, according to the + // pod failure policy. When the annotation is absent zero is implied. + JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count" + // JobControllerName reserved value for the managedBy field for the built-in + // Job controller. + JobControllerName = "kubernetes.io/job-controller" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Job represents the configuration of a single job. type Job struct { @@ -57,6 +86,7 @@ type Job struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // JobList is a collection of jobs. type JobList struct { @@ -87,6 +117,191 @@ const ( IndexedCompletion CompletionMode = "Indexed" ) +// PodFailurePolicyAction specifies how a Pod failure is handled. +// +enum +type PodFailurePolicyAction string + +const ( + // This is an action which might be taken on a pod failure - mark the + // pod's job as Failed and terminate all running pods. + PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob" + + // This is an action which might be taken on a pod failure - mark the + // Job's index as failed to avoid restarts within this index. This action + // can only be used when backoffLimitPerIndex is set. + // This value is beta-level. + PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" + + // This is an action which might be taken on a pod failure - the counter towards + // .backoffLimit, represented by the job's .status.failed field, is not + // incremented and a replacement pod is created. + PodFailurePolicyActionIgnore PodFailurePolicyAction = "Ignore" + + // This is an action which might be taken on a pod failure - the pod failure + // is handled in the default way - the counter towards .backoffLimit, + // represented by the job's .status.failed field, is incremented. + PodFailurePolicyActionCount PodFailurePolicyAction = "Count" +) + +// +enum +type PodFailurePolicyOnExitCodesOperator string + +const ( + PodFailurePolicyOnExitCodesOpIn PodFailurePolicyOnExitCodesOperator = "In" + PodFailurePolicyOnExitCodesOpNotIn PodFailurePolicyOnExitCodesOperator = "NotIn" +) + +// PodReplacementPolicy specifies the policy for creating pod replacements. +// +enum +type PodReplacementPolicy string + +const ( + // TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + TerminatingOrFailed PodReplacementPolicy = "TerminatingOrFailed" + // Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + Failed PodReplacementPolicy = "Failed" +) + +// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling +// a failed pod based on its container exit codes. In particular, it lookups the +// .state.terminated.exitCode for each app container and init container status, +// represented by the .status.containerStatuses and .status.initContainerStatuses +// fields in the Pod status, respectively. Containers completed with success +// (exit code 0) are excluded from the requirement check. +type PodFailurePolicyOnExitCodesRequirement struct { + // Restricts the check for exit codes to the container with the + // specified name. When null, the rule applies to all containers. + // When specified, it should match one the container or initContainer + // names in the pod template. + // +optional + ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + + // Represents the relationship between the container exit code(s) and the + // specified values. Containers completed with success (exit code 0) are + // excluded from the requirement check. Possible values are: + // + // - In: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is in the set of specified values. + // - NotIn: the requirement is satisfied if at least one container exit code + // (might be multiple if there are multiple containers not restricted + // by the 'containerName' field) is not in the set of specified values. + // Additional values are considered to be added in the future. Clients should + // react to an unknown operator by assuming the requirement is not satisfied. + Operator PodFailurePolicyOnExitCodesOperator `json:"operator" protobuf:"bytes,2,req,name=operator"` + + // Specifies the set of values. Each returned container exit code (might be + // multiple in case of multiple containers) is checked against this set of + // values with respect to the operator. The list of values must be ordered + // and must not contain duplicates. Value '0' cannot be used for the In operator. + // At least one element is required. At most 255 elements are allowed. + // +listType=set + Values []int32 `json:"values" protobuf:"varint,3,rep,name=values"` +} + +// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching +// an actual pod condition type. +type PodFailurePolicyOnPodConditionsPattern struct { + // Specifies the required Pod condition type. To match a pod condition + // it is required that specified type equals the pod condition type. + Type corev1.PodConditionType `json:"type" protobuf:"bytes,1,req,name=type"` + + // Specifies the required Pod condition status. To match a pod condition + // it is required that the specified status equals the pod condition status. + // Defaults to True. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,req,name=status"` +} + +// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. +// One of onExitCodes and onPodConditions, but not both, can be used in each rule. +type PodFailurePolicyRule struct { + // Specifies the action taken on a pod failure when the requirements are satisfied. + // Possible values are: + // + // - FailJob: indicates that the pod's job is marked as Failed and all + // running pods are terminated. + // - FailIndex: indicates that the pod's index is marked as Failed and will + // not be restarted. + // This value is beta-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + // - Ignore: indicates that the counter towards the .backoffLimit is not + // incremented and a replacement pod is created. + // - Count: indicates that the pod is handled in the default way - the + // counter towards the .backoffLimit is incremented. + // Additional values are considered to be added in the future. Clients should + // react to an unknown action by skipping the rule. + Action PodFailurePolicyAction `json:"action" protobuf:"bytes,1,req,name=action"` + + // Represents the requirement on the container exit codes. + // +optional + OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"` + + // Represents the requirement on the pod conditions. The requirement is represented + // as a list of pod condition patterns. The requirement is satisfied if at + // least one pattern matches an actual pod condition. At most 20 elements are allowed. + // +listType=atomic + // +optional + OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"` +} + +// PodFailurePolicy describes how failed pods influence the backoffLimit. +type PodFailurePolicy struct { + // A list of pod failure policy rules. The rules are evaluated in order. + // Once a rule matches a Pod failure, the remaining of the rules are ignored. + // When no rule matches the Pod failure, the default handling applies - the + // counter of pod failures is incremented and it is checked against + // the backoffLimit. At most 20 elements are allowed. + // +listType=atomic + Rules []PodFailurePolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` +} + +// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes. +type SuccessPolicy struct { + // rules represents the list of alternative rules for the declaring the Jobs + // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, + // the "SucceededCriteriaMet" condition is added, and the lingering pods are removed. + // The terminal state for such a Job has the "Complete" condition. + // Additionally, these rules are evaluated in order; Once the Job meets one of the rules, + // other rules are ignored. At most 20 elements are allowed. + // +listType=atomic + Rules []SuccessPolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` +} + +// SuccessPolicyRule describes rule for declaring a Job as succeeded. +// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified. +type SuccessPolicyRule struct { + // succeededIndexes specifies the set of indexes + // which need to be contained in the actual set of the succeeded indexes for the Job. + // The list of indexes must be within 0 to ".spec.completions-1" and + // must not contain duplicates. At least one element is required. + // The indexes are represented as intervals separated by commas. + // The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. + // The number are listed in represented by the first and last element of the series, + // separated by a hyphen. + // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // When this field is null, this field doesn't default to any value + // and is never evaluated at any time. + // + // +optional + SucceededIndexes *string `json:"succeededIndexes,omitempty" protobuf:"bytes,1,opt,name=succeededIndexes"` + + // succeededCount specifies the minimal required size of the actual set of the succeeded indexes + // for the Job. When succeededCount is used along with succeededIndexes, the check is + // constrained only to the set of indexes specified by succeededIndexes. + // For example, given that succeededIndexes is "1-4", succeededCount is "3", + // and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded + // because only "1" and "3" indexes are considered in that rules. + // When this field is null, this doesn't default to any value and + // is never evaluated at any time. + // When specified it needs to be a positive integer. + // + // +optional + SucceededCount *int32 `json:"succeededCount,omitempty" protobuf:"varint,2,opt,name=succeededCount"` +} + // JobSpec describes how the job execution will look like. type JobSpec struct { @@ -99,7 +314,7 @@ type JobSpec struct { Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any + // job should be run with. Setting to null means that the success of any // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. @@ -115,11 +330,57 @@ type JobSpec struct { // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + // Specifies the policy of handling failed pods. In particular, it allows to + // specify the set of actions and conditions which need to be + // satisfied to take the associated action. + // If empty, the default behaviour applies - the counter of failed pods, + // represented by the jobs's .status.failed field, is incremented and it is + // checked against the backoffLimit. This field cannot be used in combination + // with restartPolicy=OnFailure. + // + // +optional + PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` + + // successPolicy specifies the policy when the Job can be declared as succeeded. + // If empty, the default behavior applies - the Job is declared as succeeded + // only when the number of succeeded pods equals to the completions. + // When the field is specified, it must be immutable and works only for the Indexed Jobs. + // Once the Job meets the SuccessPolicy, the lingering pods are terminated. + // + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). + // +optional + SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` + // Specifies the number of retries before marking this job failed. // Defaults to 6 // +optional BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` + // Specifies the limit for the number of retries within an + // index before marking this index as failed. When enabled the number of + // failures per index is kept in the pod's + // batch.kubernetes.io/job-index-failure-count annotation. It can only + // be set when Job's completionMode=Indexed, and the Pod's restart + // policy is Never. The field is immutable. + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` + + // Specifies the maximal number of failed indexes before marking the Job as + // failed, when backoffLimitPerIndex is set. Once the number of failed + // indexes exceeds this number the entire Job is marked as Failed and its + // execution is terminated. When left as null the job continues execution of + // all of its indexes and is marked with the `Complete` Job condition. + // It can only be specified when backoffLimitPerIndex is set. + // It can be null or up to completions. It is required and must be + // less than or equal to 10^4 when is completions greater than 10^5. + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` + // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed // Optional number of failed pods to retain. // +optional @@ -145,8 +406,9 @@ type JobSpec struct { ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Describes the pod that will be created when executing a job. + // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` // ttlSecondsAfterFinished limits the lifetime of a Job that has finished // execution (either Complete or Failed). If this field is set, @@ -158,7 +420,7 @@ type JobSpec struct { // +optional TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` - // CompletionMode specifies how Pod completions are tracked. It can be + // completionMode specifies how Pod completions are tracked. It can be // `NonIndexed` (default) or `Indexed`. // // `NonIndexed` means that the Job is considered complete when there have @@ -176,13 +438,14 @@ type JobSpec struct { // `$(job-name)-$(index)-$(random-string)`, // the Pod hostname takes the form `$(job-name)-$(index)`. // - // This field is beta-level. More completion modes can be added in the future. - // If the Job controller observes a mode that it doesn't recognize, the - // controller skips updates for the Job. + // More completion modes can be added in the future. + // If the Job controller observes a mode that it doesn't recognize, which + // is possible during upgrades due to version skew, the controller + // skips updates for the Job. // +optional CompletionMode *CompletionMode `json:"completionMode,omitempty" protobuf:"bytes,9,opt,name=completionMode,casttype=CompletionMode"` - // Suspend specifies whether the Job controller should create Pods or not. If + // suspend specifies whether the Job controller should create Pods or not. If // a Job is created with suspend set to true, no Pods are created by the Job // controller. If a Job is suspended after creation (i.e. the flag goes from // false to true), the Job controller will delete all active Pods associated @@ -190,11 +453,37 @@ type JobSpec struct { // Suspending a Job will reset the StartTime field of the Job, effectively // resetting the ActiveDeadlineSeconds timer too. Defaults to false. // - // This field is beta-level, gated by SuspendJob feature flag (enabled by - // default). - // // +optional Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"` + + // podReplacementPolicy specifies when to create replacement Pods. + // Possible values are: + // - TerminatingOrFailed means that we recreate pods + // when they are terminating (has a metadata.deletionTimestamp) or failed. + // - Failed means to wait until a previously created Pod is fully terminated (has phase + // Failed or Succeeded) before creating a replacement Pod. + // + // When using podFailurePolicy, Failed is the the only allowed value. + // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. + // This is on by default. + // +optional + PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` + + // ManagedBy field indicates the controller that manages a Job. The k8s Job + // controller reconciles jobs which don't have this field at all or the field + // value is the reserved string `kubernetes.io/job-controller`, but skips + // reconciling Jobs with a custom value for this field. + // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - + // all characters before the first "/" must be a valid subdomain as defined + // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. + // + // This field is alpha-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (disabled by default). + // +optional + ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"` } // JobStatus represents the current state of a Job. @@ -205,6 +494,12 @@ type JobStatus struct { // status true; when the Job is resumed, the status of this condition will // become false. When a Job is completed, one of the conditions will have // type "Complete" and status true. + // + // A job is considered finished when it is in a terminal condition, either + // "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions. + // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions. + // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled. + // // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ // +optional // +patchMergeKey=type @@ -216,29 +511,48 @@ type JobStatus struct { // Job is created in the suspended state, this field is not set until the // first time it is resumed. This field is reset every time a Job is resumed // from suspension. It is represented in RFC3339 form and is in UTC. + // + // Once set, the field can only be removed when the job is suspended. + // The field cannot be modified while the job is unsuspended or finished. + // // +optional StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. // It is represented in RFC3339 form and is in UTC. - // The completion time is only set when the job finishes successfully. + // The completion time is set when the job finishes successfully, and only then. + // The value cannot be updated or removed. The value indicates the same or + // later point in time as the startTime field. // +optional CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - // The number of pending and running pods. + // The number of pending and running pods which are not terminating (without + // a deletionTimestamp). + // The value is zero for finished jobs. // +optional Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` // The number of pods which reached phase Succeeded. + // The value increases monotonically for a given spec. However, it may + // decrease in reaction to scale down of elastic indexed jobs. // +optional Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` // The number of pods which reached phase Failed. + // The value increases monotonically. // +optional Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` - // CompletedIndexes holds the completed indexes when .spec.completionMode = + // The number of pods which are terminating (in phase Pending or Running + // and have a deletionTimestamp). + // + // This field is beta-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (enabled by default). + // +optional + Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"` + + // completedIndexes holds the completed indexes when .spec.completionMode = // "Indexed" in a text format. The indexes are represented as decimal integers // separated by commas. The numbers are listed in increasing order. Three or // more consecutive numbers are compressed and represented by the first and @@ -248,42 +562,53 @@ type JobStatus struct { // +optional CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"` - // UncountedTerminatedPods holds the UIDs of Pods that have terminated but + // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. + // The indexes are represented in the text format analogous as for the + // `completedIndexes` field, ie. they are kept as decimal integers + // separated by commas. The numbers are listed in increasing order. Three or + // more consecutive numbers are compressed and represented by the first and + // last element of the series, separated by a hyphen. + // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + // represented as "1,3-5,7". + // The set of failed indexes cannot overlap with the set of completed indexes. + // + // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (enabled by default). + // +optional + FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` + + // uncountedTerminatedPods holds the UIDs of Pods that have terminated but // the job controller hasn't yet accounted for in the status counters. // // The job controller creates pods with a finalizer. When a pod terminates // (succeeded or failed), the controller does three steps to account for it // in the job status: - // (1) Add the pod UID to the arrays in this field. - // (2) Remove the pod finalizer. - // (3) Remove the pod UID from the arrays while increasing the corresponding + // + // 1. Add the pod UID to the arrays in this field. + // 2. Remove the pod finalizer. + // 3. Remove the pod UID from the arrays while increasing the corresponding // counter. // - // This field is beta-level. The job controller only makes use of this field - // when the feature gate JobTrackingWithFinalizers is enabled (enabled - // by default). // Old jobs might not be tracked using this field, in which case the field // remains null. + // The structure is empty for finished jobs. // +optional UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` - // The number of pods which have a Ready condition. - // - // This field is alpha-level. The job controller populates the field when - // the feature gate JobReadyPods is enabled (disabled by default). - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // been accounted in Job status counters. type UncountedTerminatedPods struct { - // Succeeded holds UIDs of succeeded Pods. + // succeeded holds UIDs of succeeded Pods. // +listType=set // +optional Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Failed holds UIDs of failed Pods. + // failed holds UIDs of failed Pods. // +listType=set // +optional Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` @@ -299,6 +624,38 @@ const ( JobComplete JobConditionType = "Complete" // JobFailed means the job has failed its execution. JobFailed JobConditionType = "Failed" + // FailureTarget means the job is about to fail its execution. + JobFailureTarget JobConditionType = "FailureTarget" + // JobSuccessCriteriaMet means the Job has been succeeded. + JobSuccessCriteriaMet JobConditionType = "SuccessCriteriaMet" +) + +const ( + // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to + // a failed pod matching a pod failure policy rule + // https://kep.k8s.io/3329 + JobReasonPodFailurePolicy string = "PodFailurePolicy" + // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of + // times higher than backOffLimit times. + JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded" + // JobReasponDeadlineExceeded means job duration is past ActiveDeadline + JobReasonDeadlineExceeded string = "DeadlineExceeded" + // JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded" + // JobReasonFailedIndexes means Job has failed indexes. + // This const is used in beta-level feature: https://kep.k8s.io/3850. + JobReasonFailedIndexes string = "FailedIndexes" + // JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to + // a Job met successPolicy. + // https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonSuccessPolicy string = "SuccessPolicy" + // JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to + // a number of succeeded Job pods met completions. + // - https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonCompletionsReached string = "CompletionsReached" ) // JobCondition describes current state of a job. @@ -306,7 +663,7 @@ type JobCondition struct { // Type of job condition, Complete or Failed. Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // Last time the condition was checked. // +optional LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` @@ -336,6 +693,7 @@ type JobTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJob represents the configuration of a single cron job. type CronJob struct { @@ -357,6 +715,7 @@ type CronJob struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJobList is a collection of cron jobs. type CronJobList struct { @@ -377,6 +736,18 @@ type CronJobSpec struct { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + // +optional + TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` + // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional @@ -384,6 +755,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -433,7 +805,7 @@ type CronJobStatus struct { // A list of pointers to currently running jobs. // +optional // +listType=atomic - Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + Active []corev1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` // Information when was the last time the job was successfully scheduled. // +optional diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 269021a9c..d50488788 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,8 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", @@ -112,15 +113,21 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", + "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", - "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", - "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.", - "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).", + "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", } func (JobSpec) SwaggerDoc() map[string]string { @@ -129,15 +136,17 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", - "active": "The number of pending and running pods.", - "succeeded": "The number of pods which reached phase Succeeded.", - "failed": "The number of pods which reached phase Failed.", - "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is beta-level. The job controller only makes use of this field when the feature gate JobTrackingWithFinalizers is enabled (enabled by default). Old jobs might not be tracked using this field, in which case the field remains null.", - "ready": "The number of pods which have a Ready condition.\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobReadyPods is enabled (disabled by default).", + "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true.\n\nA job is considered finished when it is in a terminal condition, either \"Complete\" or \"Failed\". A Job cannot have both the \"Complete\" and \"Failed\" conditions. Additionally, it cannot be in the \"Complete\" and \"FailureTarget\" conditions. The \"Complete\", \"Failed\" and \"FailureTarget\" conditions cannot be disabled.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.\n\nOnce set, the field can only be removed when the job is suspended. The field cannot be modified while the job is unsuspended or finished.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is set when the job finishes successfully, and only then. The value cannot be updated or removed. The value indicates the same or later point in time as the startTime field.", + "active": "The number of pending and running pods which are not terminating (without a deletionTimestamp). The value is zero for finished jobs.", + "succeeded": "The number of pods which reached phase Succeeded. The value increases monotonically for a given spec. However, it may decrease in reaction to scale down of elastic indexed jobs.", + "failed": "The number of pods which reached phase Failed. The value increases monotonically.", + "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", + "ready": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).", } func (JobStatus) SwaggerDoc() map[string]string { @@ -154,10 +163,70 @@ func (JobTemplateSpec) SwaggerDoc() map[string]string { return map_JobTemplateSpec } +var map_PodFailurePolicy = map[string]string{ + "": "PodFailurePolicy describes how failed pods influence the backoffLimit.", + "rules": "A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed.", +} + +func (PodFailurePolicy) SwaggerDoc() map[string]string { + return map_PodFailurePolicy +} + +var map_PodFailurePolicyOnExitCodesRequirement = map[string]string{ + "": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", + "containerName": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", + "operator": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "values": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", +} + +func (PodFailurePolicyOnExitCodesRequirement) SwaggerDoc() map[string]string { + return map_PodFailurePolicyOnExitCodesRequirement +} + +var map_PodFailurePolicyOnPodConditionsPattern = map[string]string{ + "": "PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type.", + "type": "Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type.", + "status": "Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True.", +} + +func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { + return map_PodFailurePolicyOnPodConditionsPattern +} + +var map_PodFailurePolicyRule = map[string]string{ + "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "onExitCodes": "Represents the requirement on the container exit codes.", + "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", +} + +func (PodFailurePolicyRule) SwaggerDoc() map[string]string { + return map_PodFailurePolicyRule +} + +var map_SuccessPolicy = map[string]string{ + "": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.", + "rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.", +} + +func (SuccessPolicy) SwaggerDoc() map[string]string { + return map_SuccessPolicy +} + +var map_SuccessPolicyRule = map[string]string{ + "": "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.", + "succeededIndexes": "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.", + "succeededCount": "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.", +} + +func (SuccessPolicyRule) SwaggerDoc() map[string]string { + return map_SuccessPolicyRule +} + var map_UncountedTerminatedPods = map[string]string{ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", - "succeeded": "Succeeded holds UIDs of succeeded Pods.", - "failed": "Failed holds UIDs of failed Pods.", + "succeeded": "succeeded holds UIDs of succeeded Pods.", + "failed": "failed holds UIDs of failed Pods.", } func (UncountedTerminatedPods) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index a9806a502..88c58b3d1 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -92,6 +92,11 @@ func (in *CronJobList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { *out = *in + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } if in.StartingDeadlineSeconds != nil { in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds *out = new(int64) @@ -252,11 +257,31 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(int64) **out = **in } + if in.PodFailurePolicy != nil { + in, out := &in.PodFailurePolicy, &out.PodFailurePolicy + *out = new(PodFailurePolicy) + (*in).DeepCopyInto(*out) + } + if in.SuccessPolicy != nil { + in, out := &in.SuccessPolicy, &out.SuccessPolicy + *out = new(SuccessPolicy) + (*in).DeepCopyInto(*out) + } if in.BackoffLimit != nil { in, out := &in.BackoffLimit, &out.BackoffLimit *out = new(int32) **out = **in } + if in.BackoffLimitPerIndex != nil { + in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex + *out = new(int32) + **out = **in + } + if in.MaxFailedIndexes != nil { + in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes + *out = new(int32) + **out = **in + } if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = new(metav1.LabelSelector) @@ -283,6 +308,16 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) { *out = new(bool) **out = **in } + if in.PodReplacementPolicy != nil { + in, out := &in.PodReplacementPolicy, &out.PodReplacementPolicy + *out = new(PodReplacementPolicy) + **out = **in + } + if in.ManagedBy != nil { + in, out := &in.ManagedBy, &out.ManagedBy + *out = new(string) + **out = **in + } return } @@ -314,6 +349,16 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) { in, out := &in.CompletionTime, &out.CompletionTime *out = (*in).DeepCopy() } + if in.Terminating != nil { + in, out := &in.Terminating, &out.Terminating + *out = new(int32) + **out = **in + } + if in.FailedIndexes != nil { + in, out := &in.FailedIndexes, &out.FailedIndexes + *out = new(string) + **out = **in + } if in.UncountedTerminatedPods != nil { in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods *out = new(UncountedTerminatedPods) @@ -355,6 +400,146 @@ func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicy) DeepCopyInto(out *PodFailurePolicy) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PodFailurePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicy. +func (in *PodFailurePolicy) DeepCopy() *PodFailurePolicy { + if in == nil { + return nil + } + out := new(PodFailurePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopyInto(out *PodFailurePolicyOnExitCodesRequirement) { + *out = *in + if in.ContainerName != nil { + in, out := &in.ContainerName, &out.ContainerName + *out = new(string) + **out = **in + } + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnExitCodesRequirement. +func (in *PodFailurePolicyOnExitCodesRequirement) DeepCopy() *PodFailurePolicyOnExitCodesRequirement { + if in == nil { + return nil + } + out := new(PodFailurePolicyOnExitCodesRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopyInto(out *PodFailurePolicyOnPodConditionsPattern) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyOnPodConditionsPattern. +func (in *PodFailurePolicyOnPodConditionsPattern) DeepCopy() *PodFailurePolicyOnPodConditionsPattern { + if in == nil { + return nil + } + out := new(PodFailurePolicyOnPodConditionsPattern) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodFailurePolicyRule) DeepCopyInto(out *PodFailurePolicyRule) { + *out = *in + if in.OnExitCodes != nil { + in, out := &in.OnExitCodes, &out.OnExitCodes + *out = new(PodFailurePolicyOnExitCodesRequirement) + (*in).DeepCopyInto(*out) + } + if in.OnPodConditions != nil { + in, out := &in.OnPodConditions, &out.OnPodConditions + *out = make([]PodFailurePolicyOnPodConditionsPattern, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodFailurePolicyRule. +func (in *PodFailurePolicyRule) DeepCopy() *PodFailurePolicyRule { + if in == nil { + return nil + } + out := new(PodFailurePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessPolicy) DeepCopyInto(out *SuccessPolicy) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]SuccessPolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicy. +func (in *SuccessPolicy) DeepCopy() *SuccessPolicy { + if in == nil { + return nil + } + out := new(SuccessPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessPolicyRule) DeepCopyInto(out *SuccessPolicyRule) { + *out = *in + if in.SucceededIndexes != nil { + in, out := &in.SucceededIndexes, &out.SucceededIndexes + *out = new(string) + **out = **in + } + if in.SucceededCount != nil { + in, out := &in.SucceededCount, &out.SucceededCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicyRule. +func (in *SuccessPolicyRule) DeepCopy() *SuccessPolicyRule { + if in == nil { + return nil + } + out := new(SuccessPolicyRule) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b76cb0924 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJob) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJobList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Job) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *JobList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 93794f057..895d9c919 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto +// source: k8s.io/api/batch/v1beta1/generated.proto package v1beta1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CronJob) Reset() { *m = CronJob{} } func (*CronJob) ProtoMessage() {} func (*CronJob) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{0} + return fileDescriptor_ed95843ae7b4086b, []int{0} } func (m *CronJob) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_CronJob proto.InternalMessageInfo func (m *CronJobList) Reset() { *m = CronJobList{} } func (*CronJobList) ProtoMessage() {} func (*CronJobList) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{1} + return fileDescriptor_ed95843ae7b4086b, []int{1} } func (m *CronJobList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -104,7 +104,7 @@ var xxx_messageInfo_CronJobList proto.InternalMessageInfo func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } func (*CronJobSpec) ProtoMessage() {} func (*CronJobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{2} + return fileDescriptor_ed95843ae7b4086b, []int{2} } func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +132,7 @@ var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } func (*CronJobStatus) ProtoMessage() {} func (*CronJobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{3} + return fileDescriptor_ed95843ae7b4086b, []int{3} } func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -157,38 +157,10 @@ func (m *CronJobStatus) XXX_DiscardUnknown() { var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{4} -} -func (m *JobTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplate.Merge(m, src) -} -func (m *JobTemplate) XXX_Size() int { - return m.Size() -} -func (m *JobTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplate proto.InternalMessageInfo - func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } func (*JobTemplateSpec) ProtoMessage() {} func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{5} + return fileDescriptor_ed95843ae7b4086b, []int{4} } func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,66 +190,64 @@ func init() { proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) + proto.RegisterFile("k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_ed95843ae7b4086b) } -var fileDescriptor_e57b277b05179ae7 = []byte{ - // 794 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0x41, 0x6f, 0x1b, 0x45, - 0x14, 0xc7, 0xbd, 0x4e, 0x1c, 0xbb, 0x63, 0x0a, 0xe9, 0x80, 0x52, 0xcb, 0xa0, 0xdd, 0xe0, 0xaa, - 0x22, 0x20, 0x3a, 0x4b, 0x22, 0x84, 0x38, 0x21, 0x75, 0x8b, 0x0a, 0x84, 0xa0, 0xa2, 0x71, 0x11, - 0x12, 0xaa, 0x50, 0x67, 0x67, 0x5f, 0x9c, 0x69, 0x76, 0x77, 0x56, 0x3b, 0xb3, 0x91, 0x72, 0xe3, - 0xc2, 0x9d, 0x2f, 0xc2, 0x89, 0x3b, 0xe7, 0x1c, 0x7b, 0xec, 0x69, 0x45, 0x96, 0x6f, 0xc1, 0x09, - 0xed, 0x78, 0xbd, 0x76, 0xed, 0x75, 0xd3, 0x5e, 0x72, 0xf3, 0xbc, 0xf9, 0xff, 0x7f, 0xf3, 0xf6, - 0xbd, 0x37, 0x63, 0x74, 0xff, 0xf4, 0x4b, 0x45, 0x84, 0x74, 0x4f, 0x33, 0x1f, 0xd2, 0x18, 0x34, - 0x28, 0xf7, 0x0c, 0xe2, 0x40, 0xa6, 0x6e, 0xb5, 0xc1, 0x12, 0xe1, 0xfa, 0x4c, 0xf3, 0x13, 0xf7, - 0x6c, 0xdf, 0x07, 0xcd, 0xf6, 0xdd, 0x09, 0xc4, 0x90, 0x32, 0x0d, 0x01, 0x49, 0x52, 0xa9, 0x25, - 0x1e, 0x4c, 0x95, 0x84, 0x25, 0x82, 0x18, 0x25, 0xa9, 0x94, 0xc3, 0x7b, 0x13, 0xa1, 0x4f, 0x32, - 0x9f, 0x70, 0x19, 0xb9, 0x13, 0x39, 0x91, 0xae, 0x31, 0xf8, 0xd9, 0xb1, 0x59, 0x99, 0x85, 0xf9, - 0x35, 0x05, 0x0d, 0xef, 0x34, 0x1c, 0xb9, 0x7c, 0xda, 0x70, 0xb4, 0x20, 0xe2, 0x32, 0x85, 0x26, - 0xcd, 0xe7, 0x73, 0x4d, 0xc4, 0xf8, 0x89, 0x88, 0x21, 0x3d, 0x77, 0x93, 0xd3, 0x49, 0x19, 0x50, - 0x6e, 0x04, 0x9a, 0x35, 0xb9, 0xdc, 0x75, 0xae, 0x34, 0x8b, 0xb5, 0x88, 0x60, 0xc5, 0xf0, 0xc5, - 0x55, 0x06, 0xc5, 0x4f, 0x20, 0x62, 0xcb, 0xbe, 0xd1, 0xef, 0x6d, 0xd4, 0x7d, 0x90, 0xca, 0xf8, - 0x50, 0xfa, 0xf8, 0x29, 0xea, 0x95, 0xf9, 0x04, 0x4c, 0xb3, 0x81, 0xb5, 0x6b, 0xed, 0xf5, 0x0f, - 0x3e, 0x23, 0xf3, 0x7a, 0xd6, 0x58, 0x92, 0x9c, 0x4e, 0xca, 0x80, 0x22, 0xa5, 0x9a, 0x9c, 0xed, - 0x93, 0x47, 0xfe, 0x33, 0xe0, 0xfa, 0x07, 0xd0, 0xcc, 0xc3, 0x17, 0xb9, 0xd3, 0x2a, 0x72, 0x07, - 0xcd, 0x63, 0xb4, 0xa6, 0xe2, 0x6f, 0xd0, 0xa6, 0x4a, 0x80, 0x0f, 0xda, 0x86, 0x7e, 0x97, 0xac, - 0xeb, 0x16, 0xa9, 0x52, 0x1a, 0x27, 0xc0, 0xbd, 0xb7, 0x2a, 0xe4, 0x66, 0xb9, 0xa2, 0x06, 0x80, - 0x1f, 0xa1, 0x2d, 0xa5, 0x99, 0xce, 0xd4, 0x60, 0xc3, 0xa0, 0x3e, 0xba, 0x1a, 0x65, 0xe4, 0xde, - 0xdb, 0x15, 0x6c, 0x6b, 0xba, 0xa6, 0x15, 0x66, 0xf4, 0x97, 0x85, 0xfa, 0x95, 0xf2, 0x48, 0x28, - 0x8d, 0x9f, 0xac, 0xd4, 0x82, 0xbc, 0x5e, 0x2d, 0x4a, 0xb7, 0xa9, 0xc4, 0x76, 0x75, 0x52, 0x6f, - 0x16, 0x59, 0xa8, 0xc3, 0x43, 0xd4, 0x11, 0x1a, 0x22, 0x35, 0x68, 0xef, 0x6e, 0xec, 0xf5, 0x0f, - 0x3e, 0xbc, 0x32, 0x7b, 0xef, 0x66, 0x45, 0xeb, 0x7c, 0x57, 0xfa, 0xe8, 0xd4, 0x3e, 0xfa, 0x73, - 0xb3, 0xce, 0xba, 0x2c, 0x0e, 0xfe, 0x14, 0xf5, 0xca, 0x3e, 0x07, 0x59, 0x08, 0x26, 0xeb, 0x1b, - 0xf3, 0x2c, 0xc6, 0x55, 0x9c, 0xd6, 0x0a, 0xfc, 0x13, 0xba, 0xad, 0x34, 0x4b, 0xb5, 0x88, 0x27, - 0x5f, 0x03, 0x0b, 0x42, 0x11, 0xc3, 0x18, 0xb8, 0x8c, 0x03, 0x65, 0x1a, 0xb4, 0xe1, 0xbd, 0x5f, - 0xe4, 0xce, 0xed, 0x71, 0xb3, 0x84, 0xae, 0xf3, 0xe2, 0x27, 0xe8, 0x16, 0x97, 0x31, 0xcf, 0xd2, - 0x14, 0x62, 0x7e, 0xfe, 0xa3, 0x0c, 0x05, 0x3f, 0x37, 0x6d, 0xba, 0xe1, 0x91, 0x2a, 0x9b, 0x5b, - 0x0f, 0x96, 0x05, 0xff, 0x35, 0x05, 0xe9, 0x2a, 0x08, 0xdf, 0x45, 0x5d, 0x95, 0xa9, 0x04, 0xe2, - 0x60, 0xb0, 0xb9, 0x6b, 0xed, 0xf5, 0xbc, 0x7e, 0x91, 0x3b, 0xdd, 0xf1, 0x34, 0x44, 0x67, 0x7b, - 0xf8, 0x29, 0xea, 0x3f, 0x93, 0xfe, 0x63, 0x88, 0x92, 0x90, 0x69, 0x18, 0x74, 0x4c, 0x0b, 0x3f, - 0x5e, 0x5f, 0xe7, 0xc3, 0xb9, 0xd8, 0x0c, 0xdd, 0xbb, 0x55, 0xa6, 0xfd, 0x85, 0x0d, 0xba, 0x88, - 0xc4, 0xbf, 0xa2, 0xa1, 0xca, 0x38, 0x07, 0xa5, 0x8e, 0xb3, 0xf0, 0x50, 0xfa, 0xea, 0x5b, 0xa1, - 0xb4, 0x4c, 0xcf, 0x8f, 0x44, 0x24, 0xf4, 0x60, 0x6b, 0xd7, 0xda, 0xeb, 0x78, 0x76, 0x91, 0x3b, - 0xc3, 0xf1, 0x5a, 0x15, 0x7d, 0x05, 0x01, 0x53, 0xb4, 0x73, 0xcc, 0x44, 0x08, 0xc1, 0x0a, 0xbb, - 0x6b, 0xd8, 0xc3, 0x22, 0x77, 0x76, 0x1e, 0x36, 0x2a, 0xe8, 0x1a, 0xe7, 0xe8, 0xef, 0x36, 0xba, - 0xf9, 0xd2, 0x7d, 0xc0, 0xdf, 0xa3, 0x2d, 0xc6, 0xb5, 0x38, 0x2b, 0xe7, 0xa5, 0x1c, 0xc5, 0x3b, - 0x8b, 0x25, 0x2a, 0xdf, 0xb4, 0xf9, 0xfd, 0xa6, 0x70, 0x0c, 0x65, 0x27, 0x60, 0x7e, 0x89, 0xee, - 0x1b, 0x2b, 0xad, 0x10, 0x38, 0x44, 0xdb, 0x21, 0x53, 0x7a, 0x36, 0x6a, 0x8f, 0x45, 0x04, 0xa6, - 0x49, 0xfd, 0x83, 0x4f, 0x5e, 0xef, 0xf2, 0x94, 0x0e, 0xef, 0xbd, 0x22, 0x77, 0xb6, 0x8f, 0x96, - 0x38, 0x74, 0x85, 0x8c, 0x53, 0x84, 0x4d, 0xac, 0x2e, 0xa1, 0x39, 0xaf, 0xf3, 0xc6, 0xe7, 0xed, - 0x14, 0xb9, 0x83, 0x8f, 0x56, 0x48, 0xb4, 0x81, 0x3e, 0xba, 0xb0, 0xd0, 0xe2, 0x44, 0x5c, 0xc3, - 0x93, 0xf9, 0x33, 0xea, 0xe9, 0xd9, 0x14, 0xb7, 0xdf, 0x74, 0x8a, 0xeb, 0xdb, 0x5f, 0x8f, 0x70, - 0x0d, 0x2b, 0x5f, 0xbc, 0x77, 0x96, 0xf4, 0xd7, 0xf0, 0x39, 0x5f, 0xbd, 0xf4, 0x0f, 0xf0, 0x41, - 0xd3, 0xa7, 0x90, 0x57, 0x3c, 0xfc, 0xde, 0xbd, 0x8b, 0x4b, 0xbb, 0xf5, 0xfc, 0xd2, 0x6e, 0xbd, - 0xb8, 0xb4, 0x5b, 0xbf, 0x15, 0xb6, 0x75, 0x51, 0xd8, 0xd6, 0xf3, 0xc2, 0xb6, 0x5e, 0x14, 0xb6, - 0xf5, 0x4f, 0x61, 0x5b, 0x7f, 0xfc, 0x6b, 0xb7, 0x7e, 0xe9, 0x56, 0x05, 0xf9, 0x3f, 0x00, 0x00, - 0xff, 0xff, 0xe9, 0xe0, 0x40, 0x92, 0x53, 0x08, 0x00, 0x00, +var fileDescriptor_ed95843ae7b4086b = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0xc7, 0xe3, 0x6c, 0x7e, 0x75, 0xd2, 0xc2, 0x76, 0x40, 0x5b, 0x2b, 0x20, 0x3b, 0xa4, 0xaa, + 0x08, 0x08, 0xc6, 0xec, 0x0a, 0x21, 0x4e, 0x95, 0x70, 0x51, 0x81, 0x25, 0xa8, 0x68, 0x52, 0x2e, + 0x55, 0x85, 0x3a, 0x9e, 0x4c, 0x92, 0xe9, 0xda, 0x1e, 0xcb, 0x33, 0x5e, 0x29, 0x37, 0x2e, 0xdc, + 0xf9, 0x5f, 0xb8, 0x73, 0xde, 0x63, 0x6f, 0xf4, 0x64, 0xb1, 0xe6, 0xbf, 0xe0, 0x84, 0x66, 0xe2, + 0x4d, 0xd2, 0xc4, 0xe9, 0x96, 0x0b, 0xb7, 0xcc, 0xf3, 0xf7, 0xfb, 0x99, 0xa7, 0xf7, 0xde, 0xbc, + 0x80, 0xe1, 0xd9, 0x97, 0x12, 0x71, 0xe1, 0x91, 0x84, 0x7b, 0x01, 0x51, 0x74, 0xee, 0x9d, 0x1f, + 0x07, 0x4c, 0x91, 0x63, 0x6f, 0xc6, 0x62, 0x96, 0x12, 0xc5, 0x26, 0x28, 0x49, 0x85, 0x12, 0xd0, + 0x5e, 0x2a, 0x11, 0x49, 0x38, 0x32, 0x4a, 0x54, 0x2a, 0x7b, 0x9f, 0xce, 0xb8, 0x9a, 0x67, 0x01, + 0xa2, 0x22, 0xf2, 0x66, 0x62, 0x26, 0x3c, 0x63, 0x08, 0xb2, 0xa9, 0x39, 0x99, 0x83, 0xf9, 0xb5, + 0x04, 0xf5, 0xee, 0x56, 0x5c, 0xb9, 0x7d, 0x5b, 0x6f, 0xb0, 0x21, 0xa2, 0x22, 0x65, 0x55, 0x9a, + 0xcf, 0xd7, 0x9a, 0x88, 0xd0, 0x39, 0x8f, 0x59, 0xba, 0xf0, 0x92, 0xb3, 0x99, 0x0e, 0x48, 0x2f, + 0x62, 0x8a, 0x54, 0xb9, 0xbc, 0x7d, 0xae, 0x34, 0x8b, 0x15, 0x8f, 0xd8, 0x8e, 0xe1, 0x8b, 0xeb, + 0x0c, 0x92, 0xce, 0x59, 0x44, 0xb6, 0x7d, 0x83, 0x5f, 0xeb, 0xa0, 0xfd, 0x20, 0x15, 0xf1, 0xa9, + 0x08, 0xe0, 0x33, 0xd0, 0xd1, 0xf9, 0x4c, 0x88, 0x22, 0xb6, 0xd5, 0xb7, 0x86, 0xdd, 0x93, 0xcf, + 0xd0, 0xba, 0x9e, 0x2b, 0x2c, 0x4a, 0xce, 0x66, 0x3a, 0x20, 0x91, 0x56, 0xa3, 0xf3, 0x63, 0xf4, + 0x28, 0x78, 0xce, 0xa8, 0xfa, 0x81, 0x29, 0xe2, 0xc3, 0x8b, 0xdc, 0xad, 0x15, 0xb9, 0x0b, 0xd6, + 0x31, 0xbc, 0xa2, 0xc2, 0x6f, 0x40, 0x43, 0x26, 0x8c, 0xda, 0x75, 0x43, 0xbf, 0x87, 0xf6, 0x75, + 0x0b, 0x95, 0x29, 0x8d, 0x13, 0x46, 0xfd, 0x9b, 0x25, 0xb2, 0xa1, 0x4f, 0xd8, 0x00, 0xe0, 0x23, + 0xd0, 0x92, 0x8a, 0xa8, 0x4c, 0xda, 0x07, 0x06, 0xf5, 0xe1, 0xf5, 0x28, 0x23, 0xf7, 0xdf, 0x2a, + 0x61, 0xad, 0xe5, 0x19, 0x97, 0x98, 0xc1, 0xef, 0x16, 0xe8, 0x96, 0xca, 0x11, 0x97, 0x0a, 0x3e, + 0xdd, 0xa9, 0x05, 0x7a, 0xb3, 0x5a, 0x68, 0xb7, 0xa9, 0xc4, 0x61, 0x79, 0x53, 0xe7, 0x2a, 0xb2, + 0x51, 0x87, 0x87, 0xa0, 0xc9, 0x15, 0x8b, 0xa4, 0x5d, 0xef, 0x1f, 0x0c, 0xbb, 0x27, 0x1f, 0x5c, + 0x9b, 0xbd, 0x7f, 0xab, 0xa4, 0x35, 0xbf, 0xd3, 0x3e, 0xbc, 0xb4, 0x0f, 0xfe, 0x6c, 0xac, 0xb2, + 0xd6, 0xc5, 0x81, 0x9f, 0x80, 0x8e, 0xee, 0xf3, 0x24, 0x0b, 0x99, 0xc9, 0xfa, 0xc6, 0x3a, 0x8b, + 0x71, 0x19, 0xc7, 0x2b, 0x05, 0x1c, 0x82, 0x8e, 0x1e, 0x8d, 0x27, 0x22, 0x66, 0x76, 0xc7, 0xa8, + 0x6f, 0x6a, 0xe5, 0xe3, 0x32, 0x86, 0x57, 0x5f, 0xe1, 0x4f, 0xe0, 0x8e, 0x54, 0x24, 0x55, 0x3c, + 0x9e, 0x7d, 0xcd, 0xc8, 0x24, 0xe4, 0x31, 0x1b, 0x33, 0x2a, 0xe2, 0x89, 0x34, 0xad, 0x3c, 0xf0, + 0xdf, 0x2b, 0x72, 0xf7, 0xce, 0xb8, 0x5a, 0x82, 0xf7, 0x79, 0xe1, 0x53, 0x70, 0x9b, 0x8a, 0x98, + 0x66, 0x69, 0xca, 0x62, 0xba, 0xf8, 0x51, 0x84, 0x9c, 0x2e, 0x4c, 0x43, 0x6f, 0xf8, 0xa8, 0xcc, + 0xfb, 0xf6, 0x83, 0x6d, 0xc1, 0x3f, 0x55, 0x41, 0xbc, 0x0b, 0x82, 0xf7, 0x40, 0x5b, 0x66, 0x32, + 0x61, 0xf1, 0xc4, 0x6e, 0xf4, 0xad, 0x61, 0xc7, 0xef, 0x16, 0xb9, 0xdb, 0x1e, 0x2f, 0x43, 0xf8, + 0xea, 0x1b, 0x7c, 0x06, 0xba, 0xcf, 0x45, 0xf0, 0x98, 0x45, 0x49, 0x48, 0x14, 0xb3, 0x9b, 0xa6, + 0xd9, 0x1f, 0xed, 0xef, 0xc8, 0xe9, 0x5a, 0x6c, 0xc6, 0xf3, 0x9d, 0x32, 0xd3, 0xee, 0xc6, 0x07, + 0xbc, 0x89, 0x84, 0x3f, 0x83, 0x9e, 0xcc, 0x28, 0x65, 0x52, 0x4e, 0xb3, 0xf0, 0x54, 0x04, 0xf2, + 0x5b, 0x2e, 0x95, 0x48, 0x17, 0x23, 0x1e, 0x71, 0x65, 0xb7, 0xfa, 0xd6, 0xb0, 0xe9, 0x3b, 0x45, + 0xee, 0xf6, 0xc6, 0x7b, 0x55, 0xf8, 0x35, 0x04, 0x88, 0xc1, 0xd1, 0x94, 0xf0, 0x90, 0x4d, 0x76, + 0xd8, 0x6d, 0xc3, 0xee, 0x15, 0xb9, 0x7b, 0xf4, 0xb0, 0x52, 0x81, 0xf7, 0x38, 0x07, 0x7f, 0xd4, + 0xc1, 0xad, 0x57, 0x5e, 0x0e, 0xfc, 0x1e, 0xb4, 0x08, 0x55, 0xfc, 0x5c, 0x4f, 0x96, 0x1e, 0xda, + 0xbb, 0x9b, 0x25, 0xd2, 0xdb, 0x6f, 0xbd, 0x09, 0x30, 0x9b, 0x32, 0xdd, 0x09, 0xb6, 0x7e, 0x6e, + 0x5f, 0x19, 0x2b, 0x2e, 0x11, 0x30, 0x04, 0x87, 0x21, 0x91, 0xea, 0x6a, 0x28, 0xf5, 0xc8, 0x99, + 0x26, 0x75, 0x4f, 0x3e, 0x7e, 0xb3, 0x67, 0xa6, 0x1d, 0xfe, 0xbb, 0x45, 0xee, 0x1e, 0x8e, 0xb6, + 0x38, 0x78, 0x87, 0x0c, 0x53, 0x00, 0x4d, 0x6c, 0x55, 0x42, 0x73, 0x5f, 0xf3, 0x3f, 0xdf, 0x77, + 0x54, 0xe4, 0x2e, 0x1c, 0xed, 0x90, 0x70, 0x05, 0x5d, 0x2f, 0x94, 0xb7, 0xb7, 0x46, 0xe5, 0x7f, + 0x58, 0xb0, 0xf7, 0x5f, 0x59, 0xb0, 0xef, 0x57, 0x4d, 0x31, 0x7a, 0xcd, 0x5e, 0xf5, 0xef, 0x5f, + 0x5c, 0x3a, 0xb5, 0x17, 0x97, 0x4e, 0xed, 0xe5, 0xa5, 0x53, 0xfb, 0xa5, 0x70, 0xac, 0x8b, 0xc2, + 0xb1, 0x5e, 0x14, 0x8e, 0xf5, 0xb2, 0x70, 0xac, 0xbf, 0x0a, 0xc7, 0xfa, 0xed, 0x6f, 0xa7, 0xf6, + 0xc4, 0xde, 0xf7, 0x7f, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xaa, 0x2c, 0x86, 0xaa, + 0x07, 0x00, 0x00, } func (m *CronJob) Marshal() (dAtA []byte, err error) { @@ -400,6 +370,13 @@ func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TimeZone != nil { + i -= len(*m.TimeZone) + copy(dAtA[i:], *m.TimeZone) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TimeZone))) + i-- + dAtA[i] = 0x42 + } if m.FailedJobsHistoryLimit != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) i-- @@ -509,49 +486,6 @@ func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *JobTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -662,6 +596,10 @@ func (m *CronJobSpec) Size() (n int) { if m.FailedJobsHistoryLimit != nil { n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) } + if m.TimeZone != nil { + l = len(*m.TimeZone) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -688,19 +626,6 @@ func (m *CronJobStatus) Size() (n int) { return n } -func (m *JobTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *JobTemplateSpec) Size() (n int) { if m == nil { return 0 @@ -760,6 +685,7 @@ func (this *CronJobSpec) String() string { `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `TimeZone:` + valueToStringGenerated(this.TimeZone) + `,`, `}`, }, "") return s @@ -781,17 +707,6 @@ func (this *CronJobStatus) String() string { }, "") return s } -func (this *JobTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *JobTemplateSpec) String() string { if this == nil { return "nil" @@ -1284,6 +1199,39 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } } m.FailedJobsHistoryLimit = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TimeZone = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1461,122 +1409,6 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *JobTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index b4f998c1a..6dd322128 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -28,14 +28,14 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/batch/v1beta1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -53,7 +53,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -64,6 +64,18 @@ message CronJobSpec { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. optional string schedule = 1; + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + // +optional + optional string timeZone = 8; + // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional @@ -71,6 +83,7 @@ message CronJobSpec { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one @@ -103,28 +116,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; -} - -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional JobTemplateSpec template = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // JobTemplateSpec describes the data a Job should have when created from a template @@ -132,11 +132,11 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional - optional k8s.io.api.batch.v1.JobSpec spec = 2; + optional .k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 226de49f4..9382ca23f 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -44,7 +44,6 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &JobTemplate{}, &CronJob{}, &CronJobList{}, ) diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index cd9af7a9e..976752a92 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -22,24 +22,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.8 -// +k8s:prerelease-lifecycle-gen:deprecated=1.22 - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. @@ -104,6 +86,18 @@ type CronJobSpec struct { // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + // database by the API server during CronJob validation and the controller manager during execution. + // If no system-wide time zone database can be found a bundled version of the database is used instead. + // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + // configuration, the controller will stop creating new new Jobs and will create a system event with the + // reason UnknownTimeZone. + // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + // +optional + TimeZone *string `json:"timeZone,omitempty" protobuf:"bytes,8,opt,name=timeZone"` + // Optional deadline in seconds for starting the job if it misses scheduled // time for any reason. Missed jobs executions will be counted as failed ones. // +optional @@ -111,6 +105,7 @@ type CronJobSpec struct { // Specifies how to treat concurrent executions of a Job. // Valid values are: + // // - "Allow" (default): allows CronJobs to run concurrently; // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; // - "Replace": cancels currently running job and replaces it with a new one diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index 997389812..3b3eafe8c 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ @@ -51,8 +51,9 @@ func (CronJobList) SwaggerDoc() map[string]string { var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", @@ -74,16 +75,6 @@ func (CronJobStatus) SwaggerDoc() map[string]string { return map_CronJobStatus } -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 8c256848d..2c8570332 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -90,6 +90,11 @@ func (in *CronJobList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { *out = *in + if in.TimeZone != nil { + in, out := &in.TimeZone, &out.TimeZone + *out = new(string) + **out = **in + } if in.StartingDeadlineSeconds != nil { in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds *out = new(int64) @@ -153,33 +158,6 @@ func (in *CronJobStatus) DeepCopy() *CronJobStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. -func (in *JobTemplate) DeepCopy() *JobTemplate { - if in == nil { - return nil - } - out := new(JobTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go index 2836b3b01..b57e9f1b8 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.prerelease-lifecycle.go @@ -72,21 +72,3 @@ func (in *CronJobList) APILifecycleReplacement() schema.GroupVersionKind { func (in *CronJobList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *JobTemplate) APILifecycleIntroduced() (major, minor int) { - return 1, 8 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *JobTemplate) APILifecycleDeprecated() (major, minor int) { - return 1, 22 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *JobTemplate) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/certificates/v1/doc.go b/vendor/k8s.io/api/certificates/v1/doc.go index fe3ea3af8..78434478e 100644 --- a/vendor/k8s.io/api/certificates/v1/doc.go +++ b/vendor/k8s.io/api/certificates/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=certificates.k8s.io package v1 // import "k8s.io/api/certificates/v1" diff --git a/vendor/k8s.io/api/certificates/v1/generated.pb.go b/vendor/k8s.io/api/certificates/v1/generated.pb.go index 3f06ac97a..cba4a8ea4 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1/generated.proto +// source: k8s.io/api/certificates/v1/generated.proto package v1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{0} + return fileDescriptor_5f7d41da689f96f7, []int{0} } func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{1} + return fileDescriptor_5f7d41da689f96f7, []int{1} } func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{2} + return fileDescriptor_5f7d41da689f96f7, []int{2} } func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{3} + return fileDescriptor_5f7d41da689f96f7, []int{3} } func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{4} + return fileDescriptor_5f7d41da689f96f7, []int{4} } func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_17e045d0de66f3c7, []int{5} + return fileDescriptor_5f7d41da689f96f7, []int{5} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -225,68 +225,67 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1/generated.proto", fileDescriptor_17e045d0de66f3c7) + proto.RegisterFile("k8s.io/api/certificates/v1/generated.proto", fileDescriptor_5f7d41da689f96f7) } -var fileDescriptor_17e045d0de66f3c7 = []byte{ - // 906 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xc6, 0x7f, 0x62, 0x8f, 0x43, 0xda, 0x8e, 0xa0, 0x5a, 0x2c, 0xd5, 0x6b, 0x59, 0x50, - 0x19, 0x04, 0xbb, 0x38, 0x2a, 0x10, 0x0a, 0xe2, 0xb0, 0x69, 0x84, 0x2a, 0x52, 0x90, 0x26, 0x09, - 0x87, 0xc2, 0xa1, 0x93, 0xf5, 0xeb, 0x66, 0xea, 0xee, 0x1f, 0x66, 0x66, 0xad, 0xfa, 0xd6, 0x8f, - 0xc0, 0x91, 0x23, 0x5f, 0x80, 0xcf, 0xc0, 0x35, 0xc7, 0x1e, 0x8b, 0x84, 0x2c, 0xe2, 0x7e, 0x8b, - 0x9c, 0xd0, 0xcc, 0x8e, 0xd7, 0x8e, 0x13, 0xb7, 0x25, 0xb7, 0x99, 0xf7, 0x7e, 0xef, 0xf7, 0x7b, - 0xef, 0xcd, 0x7b, 0x83, 0x76, 0x86, 0xdb, 0xc2, 0x65, 0x89, 0x37, 0xcc, 0x8e, 0x80, 0xc7, 0x20, - 0x41, 0x78, 0x23, 0x88, 0x07, 0x09, 0xf7, 0x8c, 0x83, 0xa6, 0xcc, 0x0b, 0x80, 0x4b, 0xf6, 0x98, - 0x05, 0x54, 0xbb, 0xfb, 0x5e, 0x08, 0x31, 0x70, 0x2a, 0x61, 0xe0, 0xa6, 0x3c, 0x91, 0x09, 0x6e, - 0xe5, 0x58, 0x97, 0xa6, 0xcc, 0x5d, 0xc4, 0xba, 0xa3, 0x7e, 0xeb, 0xd3, 0x90, 0xc9, 0xe3, 0xec, - 0xc8, 0x0d, 0x92, 0xc8, 0x0b, 0x93, 0x30, 0xf1, 0x74, 0xc8, 0x51, 0xf6, 0x58, 0xdf, 0xf4, 0x45, - 0x9f, 0x72, 0xaa, 0x56, 0x77, 0x51, 0x36, 0xe1, 0x70, 0x89, 0x5c, 0xeb, 0xce, 0x1c, 0x13, 0xd1, - 0xe0, 0x98, 0xc5, 0xc0, 0xc7, 0x5e, 0x3a, 0x0c, 0x95, 0x41, 0x78, 0x11, 0x48, 0x7a, 0x59, 0x94, - 0xb7, 0x2a, 0x8a, 0x67, 0xb1, 0x64, 0x11, 0x5c, 0x08, 0xf8, 0xe2, 0x4d, 0x01, 0x22, 0x38, 0x86, - 0x88, 0x2e, 0xc7, 0x75, 0xff, 0x5a, 0x43, 0xef, 0xef, 0xcc, 0xbb, 0xb0, 0xcf, 0xc2, 0x98, 0xc5, - 0x21, 0x81, 0x5f, 0x33, 0x10, 0x12, 0x3f, 0x42, 0x75, 0x95, 0xe1, 0x80, 0x4a, 0x6a, 0x5b, 0x1d, - 0xab, 0xd7, 0xdc, 0xfa, 0xcc, 0x9d, 0xb7, 0xaf, 0x10, 0x72, 0xd3, 0x61, 0xa8, 0x0c, 0xc2, 0x55, - 0x68, 0x77, 0xd4, 0x77, 0x7f, 0x3c, 0x7a, 0x02, 0x81, 0x7c, 0x00, 0x92, 0xfa, 0xf8, 0x64, 0xe2, - 0x94, 0xa6, 0x13, 0x07, 0xcd, 0x6d, 0xa4, 0x60, 0xc5, 0x3f, 0xa3, 0x8a, 0x48, 0x21, 0xb0, 0xd7, - 0x34, 0xfb, 0x57, 0xee, 0xea, 0xc7, 0x71, 0x57, 0xa6, 0xb9, 0x9f, 0x42, 0xe0, 0x6f, 0x18, 0x99, - 0x8a, 0xba, 0x11, 0x4d, 0x8a, 0x03, 0x54, 0x13, 0x92, 0xca, 0x4c, 0xd8, 0x65, 0x4d, 0xff, 0xf5, - 0xd5, 0xe8, 0x35, 0x85, 0xbf, 0x69, 0x04, 0x6a, 0xf9, 0x9d, 0x18, 0xea, 0xee, 0xab, 0x32, 0xea, - 0xae, 0x8c, 0xdd, 0x49, 0xe2, 0x01, 0x93, 0x2c, 0x89, 0xf1, 0x36, 0xaa, 0xc8, 0x71, 0x0a, 0xba, - 0x8d, 0x0d, 0xff, 0x83, 0x59, 0xb6, 0x07, 0xe3, 0x14, 0xce, 0x26, 0xce, 0xbb, 0xcb, 0x78, 0x65, - 0x27, 0x3a, 0x02, 0xef, 0x15, 0x55, 0xd4, 0x74, 0xec, 0x9d, 0xf3, 0x89, 0x9c, 0x4d, 0x9c, 0x4b, - 0xe6, 0xd0, 0x2d, 0x98, 0xce, 0xa7, 0x8b, 0x6f, 0xa3, 0x1a, 0x07, 0x2a, 0x92, 0x58, 0xb7, 0xbc, - 0x31, 0x2f, 0x8b, 0x68, 0x2b, 0x31, 0x5e, 0xfc, 0x11, 0x5a, 0x8f, 0x40, 0x08, 0x1a, 0x82, 0x6e, - 0x5e, 0xc3, 0xbf, 0x66, 0x80, 0xeb, 0x0f, 0x72, 0x33, 0x99, 0xf9, 0xf1, 0x13, 0xb4, 0xf9, 0x94, - 0x0a, 0x79, 0x98, 0x0e, 0xa8, 0x84, 0x03, 0x16, 0x81, 0x5d, 0xd1, 0xed, 0xfe, 0xf8, 0xed, 0x66, - 0x45, 0x45, 0xf8, 0x37, 0x0d, 0xfb, 0xe6, 0xde, 0x39, 0x26, 0xb2, 0xc4, 0x8c, 0x47, 0x08, 0x2b, - 0xcb, 0x01, 0xa7, 0xb1, 0xc8, 0x1b, 0xa5, 0xf4, 0xaa, 0xff, 0x5b, 0xaf, 0x65, 0xf4, 0xf0, 0xde, - 0x05, 0x36, 0x72, 0x89, 0x42, 0xf7, 0x6f, 0x0b, 0xdd, 0x5a, 0xf9, 0xca, 0x7b, 0x4c, 0x48, 0xfc, - 0xcb, 0x85, 0x5d, 0x71, 0xdf, 0x2e, 0x1f, 0x15, 0xad, 0x37, 0xe5, 0xba, 0xc9, 0xa9, 0x3e, 0xb3, - 0x2c, 0xec, 0xc9, 0x43, 0x54, 0x65, 0x12, 0x22, 0x61, 0xaf, 0x75, 0xca, 0xbd, 0xe6, 0xd6, 0xe7, - 0x57, 0x9a, 0x64, 0xff, 0x1d, 0xa3, 0x50, 0xbd, 0xaf, 0xb8, 0x48, 0x4e, 0xd9, 0xfd, 0xb3, 0xf2, - 0x9a, 0xda, 0xd4, 0x3a, 0xe1, 0x0f, 0xd1, 0x3a, 0xcf, 0xaf, 0xba, 0xb4, 0x0d, 0xbf, 0xa9, 0x06, - 0xc1, 0x20, 0xc8, 0xcc, 0x87, 0xb7, 0x10, 0x12, 0x2c, 0x8c, 0x81, 0xff, 0x40, 0x23, 0xb0, 0xd7, - 0xf5, 0xd8, 0x14, 0xeb, 0xbf, 0x5f, 0x78, 0xc8, 0x02, 0x0a, 0xef, 0xa0, 0x1b, 0xf0, 0x2c, 0x65, - 0x9c, 0xea, 0x59, 0x85, 0x20, 0x89, 0x07, 0xc2, 0xae, 0x77, 0xac, 0x5e, 0xd5, 0x7f, 0x6f, 0x3a, - 0x71, 0x6e, 0xec, 0x2e, 0x3b, 0xc9, 0x45, 0x3c, 0x76, 0x51, 0x2d, 0x53, 0xa3, 0x28, 0xec, 0x6a, - 0xa7, 0xdc, 0x6b, 0xf8, 0x37, 0xd5, 0x40, 0x1f, 0x6a, 0xcb, 0xd9, 0xc4, 0xa9, 0x7f, 0x0f, 0x63, - 0x7d, 0x21, 0x06, 0x85, 0x3f, 0x41, 0xf5, 0x4c, 0x00, 0x8f, 0x55, 0x9a, 0xf9, 0x1a, 0x14, 0xbd, - 0x3f, 0x34, 0x76, 0x52, 0x20, 0xf0, 0x2d, 0x54, 0xce, 0xd8, 0xc0, 0xac, 0x41, 0xd3, 0x00, 0xcb, - 0x87, 0xf7, 0xef, 0x11, 0x65, 0xc7, 0x5d, 0x54, 0x0b, 0x79, 0x92, 0xa5, 0xc2, 0xae, 0x68, 0x71, - 0xa4, 0xc4, 0xbf, 0xd3, 0x16, 0x62, 0x3c, 0x98, 0xa1, 0x2a, 0x3c, 0x93, 0x9c, 0xda, 0x35, 0xfd, - 0x7c, 0xf7, 0xae, 0xfc, 0xcf, 0xb9, 0xbb, 0x8a, 0x66, 0x37, 0x96, 0x7c, 0x3c, 0x7f, 0x4d, 0x6d, - 0x23, 0xb9, 0x42, 0xeb, 0x11, 0x42, 0x73, 0x0c, 0xbe, 0x8e, 0xca, 0x43, 0x18, 0xe7, 0xbf, 0x0e, - 0x51, 0x47, 0xfc, 0x0d, 0xaa, 0x8e, 0xe8, 0xd3, 0x0c, 0xcc, 0x97, 0x7b, 0xfb, 0x75, 0xa9, 0x68, - 0xa2, 0x9f, 0x14, 0x9a, 0xe4, 0x41, 0x77, 0xd7, 0xb6, 0xad, 0xee, 0x89, 0x85, 0x9c, 0x37, 0xfc, - 0x96, 0x98, 0x23, 0x14, 0xcc, 0x7e, 0x20, 0x61, 0x5b, 0xba, 0xea, 0x6f, 0xaf, 0x54, 0x75, 0xf1, - 0x91, 0xcd, 0x47, 0xa9, 0x30, 0x09, 0xb2, 0xa0, 0x82, 0xfb, 0xa8, 0xb9, 0xc0, 0xaa, 0xeb, 0xdb, - 0xf0, 0xaf, 0x4d, 0x27, 0x4e, 0x73, 0x81, 0x9c, 0x2c, 0x62, 0xba, 0x5f, 0x9a, 0x66, 0xe9, 0x1a, - 0xb1, 0x33, 0x5b, 0x32, 0x4b, 0x3f, 0x64, 0x63, 0x79, 0x53, 0xee, 0xd6, 0x7f, 0xff, 0xc3, 0x29, - 0x3d, 0xff, 0xa7, 0x53, 0xf2, 0x7b, 0x27, 0xa7, 0xed, 0xd2, 0x8b, 0xd3, 0x76, 0xe9, 0xe5, 0x69, - 0xbb, 0xf4, 0x7c, 0xda, 0xb6, 0x4e, 0xa6, 0x6d, 0xeb, 0xc5, 0xb4, 0x6d, 0xbd, 0x9c, 0xb6, 0xad, - 0x7f, 0xa7, 0x6d, 0xeb, 0xb7, 0x57, 0xed, 0xd2, 0xc3, 0xb5, 0x51, 0xff, 0xbf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x88, 0x08, 0x6d, 0x53, 0xb5, 0x08, 0x00, 0x00, +var fileDescriptor_5f7d41da689f96f7 = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xc6, 0x7f, 0x62, 0x8f, 0x43, 0xda, 0x8e, 0xa0, 0x5a, 0x2c, 0xd5, 0x6b, 0xad, 0xa0, + 0x0a, 0x15, 0xcc, 0x92, 0xa8, 0x40, 0x28, 0x08, 0xa1, 0x4d, 0x23, 0x54, 0x91, 0x82, 0x34, 0x49, + 0x38, 0x14, 0x0e, 0x9d, 0xac, 0x5f, 0x37, 0xd3, 0x74, 0xff, 0xb0, 0x33, 0x6b, 0xd5, 0xb7, 0x7e, + 0x04, 0x8e, 0x1c, 0xf9, 0x02, 0x7c, 0x06, 0xae, 0x39, 0xf6, 0x58, 0x24, 0x64, 0x11, 0xf7, 0x5b, + 0xe4, 0x84, 0x66, 0x76, 0xbc, 0x76, 0x9c, 0xb8, 0x0d, 0xb9, 0x79, 0x7e, 0xf3, 0x7b, 0xbf, 0xdf, + 0x7b, 0x6f, 0xdf, 0x1b, 0x19, 0xdd, 0x39, 0xda, 0x14, 0x84, 0x27, 0x1e, 0x4b, 0xb9, 0x17, 0x40, + 0x26, 0xf9, 0x13, 0x1e, 0x30, 0x09, 0xc2, 0x1b, 0xac, 0x7b, 0x21, 0xc4, 0x90, 0x31, 0x09, 0x7d, + 0x92, 0x66, 0x89, 0x4c, 0x70, 0xa7, 0xe0, 0x12, 0x96, 0x72, 0x32, 0xcb, 0x25, 0x83, 0xf5, 0xce, + 0x27, 0x21, 0x97, 0x87, 0xf9, 0x01, 0x09, 0x92, 0xc8, 0x0b, 0x93, 0x30, 0xf1, 0x74, 0xc8, 0x41, + 0xfe, 0x44, 0x9f, 0xf4, 0x41, 0xff, 0x2a, 0xa4, 0x3a, 0xee, 0xac, 0x6d, 0x92, 0xc1, 0x05, 0x76, + 0x9d, 0xbb, 0x53, 0x4e, 0xc4, 0x82, 0x43, 0x1e, 0x43, 0x36, 0xf4, 0xd2, 0xa3, 0x50, 0x01, 0xc2, + 0x8b, 0x40, 0xb2, 0x8b, 0xa2, 0xbc, 0x45, 0x51, 0x59, 0x1e, 0x4b, 0x1e, 0xc1, 0xb9, 0x80, 0xcf, + 0xdf, 0x16, 0x20, 0x82, 0x43, 0x88, 0xd8, 0x7c, 0x9c, 0xfb, 0xd7, 0x12, 0x7a, 0x7f, 0x6b, 0xda, + 0x85, 0x5d, 0x1e, 0xc6, 0x3c, 0x0e, 0x29, 0xfc, 0x9a, 0x83, 0x90, 0xf8, 0x31, 0x6a, 0xaa, 0x0c, + 0xfb, 0x4c, 0x32, 0xdb, 0xea, 0x59, 0x6b, 0xed, 0x8d, 0x4f, 0xc9, 0xb4, 0x7d, 0xa5, 0x11, 0x49, + 0x8f, 0x42, 0x05, 0x08, 0xa2, 0xd8, 0x64, 0xb0, 0x4e, 0x7e, 0x3c, 0x78, 0x0a, 0x81, 0x7c, 0x08, + 0x92, 0xf9, 0xf8, 0x78, 0xe4, 0x54, 0xc6, 0x23, 0x07, 0x4d, 0x31, 0x5a, 0xaa, 0xe2, 0x9f, 0x51, + 0x4d, 0xa4, 0x10, 0xd8, 0x4b, 0x5a, 0xfd, 0x4b, 0xb2, 0xf8, 0xe3, 0x90, 0x85, 0x69, 0xee, 0xa6, + 0x10, 0xf8, 0x2b, 0xc6, 0xa6, 0xa6, 0x4e, 0x54, 0x8b, 0xe2, 0x00, 0x35, 0x84, 0x64, 0x32, 0x17, + 0x76, 0x55, 0xcb, 0x7f, 0x75, 0x35, 0x79, 0x2d, 0xe1, 0xaf, 0x1a, 0x83, 0x46, 0x71, 0xa6, 0x46, + 0xda, 0x7d, 0x5d, 0x45, 0xee, 0xc2, 0xd8, 0xad, 0x24, 0xee, 0x73, 0xc9, 0x93, 0x18, 0x6f, 0xa2, + 0x9a, 0x1c, 0xa6, 0xa0, 0xdb, 0xd8, 0xf2, 0x3f, 0x98, 0x64, 0xbb, 0x37, 0x4c, 0xe1, 0x74, 0xe4, + 0xbc, 0x3b, 0xcf, 0x57, 0x38, 0xd5, 0x11, 0x78, 0xa7, 0xac, 0xa2, 0xa1, 0x63, 0xef, 0x9e, 0x4d, + 0xe4, 0x74, 0xe4, 0x5c, 0x30, 0x87, 0xa4, 0x54, 0x3a, 0x9b, 0x2e, 0xbe, 0x8d, 0x1a, 0x19, 0x30, + 0x91, 0xc4, 0xba, 0xe5, 0xad, 0x69, 0x59, 0x54, 0xa3, 0xd4, 0xdc, 0xe2, 0x8f, 0xd0, 0x72, 0x04, + 0x42, 0xb0, 0x10, 0x74, 0xf3, 0x5a, 0xfe, 0x35, 0x43, 0x5c, 0x7e, 0x58, 0xc0, 0x74, 0x72, 0x8f, + 0x9f, 0xa2, 0xd5, 0x67, 0x4c, 0xc8, 0xfd, 0xb4, 0xcf, 0x24, 0xec, 0xf1, 0x08, 0xec, 0x9a, 0x6e, + 0xf7, 0x9d, 0xcb, 0xcd, 0x8a, 0x8a, 0xf0, 0x6f, 0x1a, 0xf5, 0xd5, 0x9d, 0x33, 0x4a, 0x74, 0x4e, + 0x19, 0x0f, 0x10, 0x56, 0xc8, 0x5e, 0xc6, 0x62, 0x51, 0x34, 0x4a, 0xf9, 0xd5, 0xff, 0xb7, 0x5f, + 0xc7, 0xf8, 0xe1, 0x9d, 0x73, 0x6a, 0xf4, 0x02, 0x07, 0xf7, 0x6f, 0x0b, 0xdd, 0x5a, 0xf8, 0x95, + 0x77, 0xb8, 0x90, 0xf8, 0x97, 0x73, 0xbb, 0x42, 0x2e, 0x97, 0x8f, 0x8a, 0xd6, 0x9b, 0x72, 0xdd, + 0xe4, 0xd4, 0x9c, 0x20, 0x33, 0x7b, 0xf2, 0x08, 0xd5, 0xb9, 0x84, 0x48, 0xd8, 0x4b, 0xbd, 0xea, + 0x5a, 0x7b, 0xe3, 0xb3, 0x2b, 0x4d, 0xb2, 0xff, 0x8e, 0x71, 0xa8, 0x3f, 0x50, 0x5a, 0xb4, 0x90, + 0x74, 0xff, 0xac, 0xbd, 0xa1, 0x36, 0xb5, 0x4e, 0xf8, 0x43, 0xb4, 0x9c, 0x15, 0x47, 0x5d, 0xda, + 0x8a, 0xdf, 0x56, 0x83, 0x60, 0x18, 0x74, 0x72, 0x87, 0x37, 0x10, 0x12, 0x3c, 0x8c, 0x21, 0xfb, + 0x81, 0x45, 0x60, 0x2f, 0xeb, 0xb1, 0x29, 0xd7, 0x7f, 0xb7, 0xbc, 0xa1, 0x33, 0x2c, 0xbc, 0x85, + 0x6e, 0xc0, 0xf3, 0x94, 0x67, 0x4c, 0xcf, 0x2a, 0x04, 0x49, 0xdc, 0x17, 0x76, 0xb3, 0x67, 0xad, + 0xd5, 0xfd, 0xf7, 0xc6, 0x23, 0xe7, 0xc6, 0xf6, 0xfc, 0x25, 0x3d, 0xcf, 0xc7, 0x04, 0x35, 0x72, + 0x35, 0x8a, 0xc2, 0xae, 0xf7, 0xaa, 0x6b, 0x2d, 0xff, 0xa6, 0x1a, 0xe8, 0x7d, 0x8d, 0x9c, 0x8e, + 0x9c, 0xe6, 0xf7, 0x30, 0xd4, 0x07, 0x6a, 0x58, 0xf8, 0x63, 0xd4, 0xcc, 0x05, 0x64, 0xb1, 0x4a, + 0xb3, 0x58, 0x83, 0xb2, 0xf7, 0xfb, 0x06, 0xa7, 0x25, 0x03, 0xdf, 0x42, 0xd5, 0x9c, 0xf7, 0xcd, + 0x1a, 0xb4, 0x0d, 0xb1, 0xba, 0xff, 0xe0, 0x3e, 0x55, 0x38, 0x76, 0x51, 0x23, 0xcc, 0x92, 0x3c, + 0x15, 0x76, 0x4d, 0x9b, 0x23, 0x65, 0xfe, 0x9d, 0x46, 0xa8, 0xb9, 0xc1, 0x1c, 0xd5, 0xe1, 0xb9, + 0xcc, 0x98, 0xdd, 0xd0, 0x9f, 0xef, 0xfe, 0x95, 0xdf, 0x39, 0xb2, 0xad, 0x64, 0xb6, 0x63, 0x99, + 0x0d, 0xa7, 0x5f, 0x53, 0x63, 0xb4, 0x70, 0xe8, 0x3c, 0x46, 0x68, 0xca, 0xc1, 0xd7, 0x51, 0xf5, + 0x08, 0x86, 0xc5, 0xab, 0x43, 0xd5, 0x4f, 0xfc, 0x35, 0xaa, 0x0f, 0xd8, 0xb3, 0x1c, 0xcc, 0x93, + 0x7b, 0xfb, 0x4d, 0xa9, 0x68, 0xa1, 0x9f, 0x14, 0x9b, 0x16, 0x41, 0xf7, 0x96, 0x36, 0x2d, 0xf7, + 0xd8, 0x42, 0xce, 0x5b, 0x5e, 0x4b, 0x9c, 0x21, 0x14, 0x4c, 0x5e, 0x20, 0x61, 0x5b, 0xba, 0xea, + 0x6f, 0xae, 0x54, 0x75, 0xf9, 0x90, 0x4d, 0x47, 0xa9, 0x84, 0x04, 0x9d, 0x71, 0xc1, 0xeb, 0xa8, + 0x3d, 0xa3, 0xaa, 0xeb, 0x5b, 0xf1, 0xaf, 0x8d, 0x47, 0x4e, 0x7b, 0x46, 0x9c, 0xce, 0x72, 0xdc, + 0x2f, 0x4c, 0xb3, 0x74, 0x8d, 0xd8, 0x99, 0x2c, 0x99, 0xa5, 0x3f, 0x64, 0x6b, 0x7e, 0x53, 0xee, + 0x35, 0x7f, 0xff, 0xc3, 0xa9, 0xbc, 0xf8, 0xa7, 0x57, 0xf1, 0xbf, 0x3d, 0x3e, 0xe9, 0x56, 0x5e, + 0x9e, 0x74, 0x2b, 0xaf, 0x4e, 0xba, 0x95, 0x17, 0xe3, 0xae, 0x75, 0x3c, 0xee, 0x5a, 0x2f, 0xc7, + 0x5d, 0xeb, 0xd5, 0xb8, 0x6b, 0xfd, 0x3b, 0xee, 0x5a, 0xbf, 0xbd, 0xee, 0x56, 0x1e, 0x75, 0x16, + 0xff, 0x2f, 0xf9, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x4a, 0x4f, 0xbc, 0xb4, 0x08, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto index 1db9e324e..dac7c7f5f 100644 --- a/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/certificates/v1"; // CertificateSigningRequest objects provide a mechanism to obtain x509 certificates // by submitting a certificate signing request, and having it asynchronously approved and issued. @@ -41,7 +41,7 @@ option go_package = "v1"; // or to obtain certificates from custom non-Kubernetes signers. message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -87,19 +87,19 @@ message CertificateSigningRequestCondition { // lastUpdateTime is the time of the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } // CertificateSigningRequestList is a collection of CertificateSigningRequest objects message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a collection of CertificateSigningRequest objects repeated CertificateSigningRequest items = 2; @@ -154,8 +154,6 @@ message CertificateSigningRequestSpec { // // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. // - // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. - // // +optional optional int32 expirationSeconds = 8; diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go index bcebb10c6..ba8009840 100644 --- a/vendor/k8s.io/api/certificates/v1/types.go +++ b/vendor/k8s.io/api/certificates/v1/types.go @@ -27,6 +27,7 @@ import ( // +genclient:nonNamespaced // +genclient:method=UpdateApproval,verb=update,subresource=approval,input=k8s.io/api/certificates/v1.CertificateSigningRequest,result=k8s.io/api/certificates/v1.CertificateSigningRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequest objects provide a mechanism to obtain x509 certificates // by submitting a certificate signing request, and having it asynchronously approved and issued. @@ -103,8 +104,6 @@ type CertificateSigningRequestSpec struct { // // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. // - // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. - // // +optional ExpirationSeconds *int32 `json:"expirationSeconds,omitempty" protobuf:"varint,8,opt,name=expirationSeconds"` @@ -264,6 +263,7 @@ type CertificateSigningRequestCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequestList is a collection of CertificateSigningRequest objects type CertificateSigningRequestList struct { @@ -276,8 +276,9 @@ type CertificateSigningRequestList struct { } // KeyUsage specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // +enum diff --git a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go index 2714584ed..4bdf39ebb 100644 --- a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ @@ -64,7 +64,7 @@ var map_CertificateSigningRequestSpec = map[string]string{ "": "CertificateSigningRequestSpec contains the certificate request.", "request": "request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.", "signerName": "signerName indicates the requested signer, and is a qualified name.\n\nList/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector.\n\nWell-known Kubernetes signers are:\n 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver.\n Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver.\n Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.\n Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n\nMore details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers\n\nCustom signerNames can also be specified. The signer defines:\n 1. Trust distribution: how trust (CA bundles) are distributed.\n 2. Permitted subjects: and behavior when a disallowed subject is requested.\n 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.\n 4. Required, permitted, or forbidden key usages / extended key usages.\n 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.\n 6. Whether or not requests for CA certificates are allowed.", - "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.\n\nAs of v1.22, this field is beta and is controlled via the CSRDuration feature gate.", + "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", "usages": "usages specifies a set of key usages requested in the issued certificate.\n\nRequests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\".\n\nRequests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\".\n\nValid values are:\n \"signing\", \"digital signature\", \"content commitment\",\n \"key encipherment\", \"key agreement\", \"data encipherment\",\n \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\",\n \"server auth\", \"client auth\",\n \"code signing\", \"email protection\", \"s/mime\",\n \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\",\n \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"", "username": "username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", "uid": "uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", diff --git a/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..3a2b27403 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequestList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/doc.go b/vendor/k8s.io/api/certificates/v1alpha1/doc.go new file mode 100644 index 000000000..d83d0e820 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=certificates.k8s.io + +package v1alpha1 // import "k8s.io/api/certificates/v1alpha1" diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go new file mode 100644 index 000000000..a62a40059 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go @@ -0,0 +1,831 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/certificates/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} } +func (*ClusterTrustBundle) ProtoMessage() {} +func (*ClusterTrustBundle) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{0} +} +func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundle.Merge(m, src) +} +func (m *ClusterTrustBundle) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundle) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo + +func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} } +func (*ClusterTrustBundleList) ProtoMessage() {} +func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{1} +} +func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleList.Merge(m, src) +} +func (m *ClusterTrustBundleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo + +func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} } +func (*ClusterTrustBundleSpec) ProtoMessage() {} +func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f73d5fe56c015bb8, []int{2} +} +func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src) +} +func (m *ClusterTrustBundleSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle") + proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList") + proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_f73d5fe56c015bb8) +} + +var fileDescriptor_f73d5fe56c015bb8 = []byte{ + // 437 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6a, 0xdb, 0x40, + 0x10, 0xc6, 0xb5, 0x69, 0x02, 0xc9, 0xba, 0x85, 0xa2, 0x42, 0x31, 0x3e, 0x6c, 0x8c, 0x4f, 0xb9, + 0x74, 0x37, 0x36, 0x69, 0xc9, 0x59, 0x85, 0x42, 0xa1, 0x7f, 0x40, 0xe9, 0xa5, 0xa1, 0x87, 0xae, + 0xd7, 0x13, 0x79, 0x6b, 0x4b, 0x5a, 0x76, 0x57, 0x86, 0xde, 0x0a, 0x7d, 0x81, 0x3e, 0x96, 0x8f, + 0x69, 0x4f, 0x39, 0x85, 0x5a, 0x7d, 0x91, 0xb2, 0x6b, 0xd9, 0x12, 0x55, 0x8b, 0x4b, 0x6e, 0x9a, + 0xd1, 0xfc, 0xbe, 0x6f, 0xbe, 0x11, 0xc2, 0xa7, 0xb3, 0x73, 0x43, 0x65, 0xce, 0xb8, 0x92, 0x4c, + 0x80, 0xb6, 0xf2, 0x4a, 0x0a, 0x6e, 0xc1, 0xb0, 0xc5, 0x90, 0xcf, 0xd5, 0x94, 0x0f, 0x59, 0x02, + 0x19, 0x68, 0x6e, 0x61, 0x42, 0x95, 0xce, 0x6d, 0x1e, 0xf6, 0xd7, 0x04, 0xe5, 0x4a, 0xd2, 0x26, + 0x41, 0x37, 0x44, 0xef, 0x49, 0x22, 0xed, 0xb4, 0x18, 0x53, 0x91, 0xa7, 0x2c, 0xc9, 0x93, 0x9c, + 0x79, 0x70, 0x5c, 0x5c, 0xf9, 0xca, 0x17, 0xfe, 0x69, 0x2d, 0xd8, 0x3b, 0xab, 0x57, 0x48, 0xb9, + 0x98, 0xca, 0x0c, 0xf4, 0x67, 0xa6, 0x66, 0x89, 0x6b, 0x18, 0x96, 0x82, 0xe5, 0x6c, 0xd1, 0x5a, + 0xa3, 0xc7, 0xfe, 0x45, 0xe9, 0x22, 0xb3, 0x32, 0x85, 0x16, 0xf0, 0x6c, 0x17, 0x60, 0xc4, 0x14, + 0x52, 0xfe, 0x27, 0x37, 0xf8, 0x81, 0x70, 0xf8, 0x7c, 0x5e, 0x18, 0x0b, 0xfa, 0x9d, 0x2e, 0x8c, + 0x8d, 0x8a, 0x6c, 0x32, 0x87, 0xf0, 0x23, 0x3e, 0x74, 0xab, 0x4d, 0xb8, 0xe5, 0x5d, 0xd4, 0x47, + 0x27, 0x9d, 0xd1, 0x29, 0xad, 0x2f, 0xb3, 0x75, 0xa0, 0x6a, 0x96, 0xb8, 0x86, 0xa1, 0x6e, 0x9a, + 0x2e, 0x86, 0xf4, 0xed, 0xf8, 0x13, 0x08, 0xfb, 0x1a, 0x2c, 0x8f, 0xc2, 0xe5, 0xed, 0x71, 0x50, + 0xde, 0x1e, 0xe3, 0xba, 0x17, 0x6f, 0x55, 0xc3, 0x4b, 0xbc, 0x6f, 0x14, 0x88, 0xee, 0x9e, 0x57, + 0x3f, 0xa7, 0xbb, 0xee, 0x4e, 0xdb, 0x5b, 0x5e, 0x28, 0x10, 0xd1, 0xfd, 0xca, 0x65, 0xdf, 0x55, + 0xb1, 0xd7, 0x1c, 0x7c, 0x47, 0xf8, 0x71, 0x7b, 0xfc, 0x95, 0x34, 0x36, 0xfc, 0xd0, 0x0a, 0x46, + 0xff, 0x2f, 0x98, 0xa3, 0x7d, 0xac, 0x87, 0x95, 0xe1, 0xe1, 0xa6, 0xd3, 0x08, 0xf5, 0x1e, 0x1f, + 0x48, 0x0b, 0xa9, 0xe9, 0xee, 0xf5, 0xef, 0x9d, 0x74, 0x46, 0x67, 0x77, 0x49, 0x15, 0x3d, 0xa8, + 0x0c, 0x0e, 0x5e, 0x3a, 0xa9, 0x78, 0xad, 0x38, 0xf8, 0xfa, 0xd7, 0x4c, 0x2e, 0x74, 0x38, 0xc2, + 0xd8, 0xc8, 0x24, 0x03, 0xfd, 0x86, 0xa7, 0xe0, 0x53, 0x1d, 0xd5, 0xc7, 0xbf, 0xd8, 0xbe, 0x89, + 0x1b, 0x53, 0xe1, 0x53, 0xdc, 0xb1, 0xb5, 0x8c, 0xff, 0x0a, 0x47, 0xd1, 0xa3, 0x0a, 0xea, 0x34, + 0x1c, 0xe2, 0xe6, 0x5c, 0xf4, 0x62, 0xb9, 0x22, 0xc1, 0xf5, 0x8a, 0x04, 0x37, 0x2b, 0x12, 0x7c, + 0x29, 0x09, 0x5a, 0x96, 0x04, 0x5d, 0x97, 0x04, 0xdd, 0x94, 0x04, 0xfd, 0x2c, 0x09, 0xfa, 0xf6, + 0x8b, 0x04, 0x97, 0xfd, 0x5d, 0xbf, 0xdd, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x1c, 0xcb, + 0xdd, 0x99, 0x03, 0x00, 0x00, +} + +func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterTrustBundleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TrustBundle) + copy(dAtA[i:], m.TrustBundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TrustBundle))) + i-- + dAtA[i] = 0x12 + i -= len(m.SignerName) + copy(dAtA[i:], m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterTrustBundle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterTrustBundleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterTrustBundleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TrustBundle) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterTrustBundle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundle{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterTrustBundle{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterTrustBundleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterTrustBundleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleSpec{`, + `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`, + `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterTrustBundle{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustBundle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto new file mode 100644 index 000000000..7155f778c --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.certificates.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/certificates/v1alpha1"; + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +message ClusterTrustBundle { + // metadata contains the object metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the signer (if any) and trust anchors. + optional ClusterTrustBundleSpec spec = 2; +} + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +message ClusterTrustBundleList { + // metadata contains the list metadata. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a collection of ClusterTrustBundle objects + repeated ClusterTrustBundle items = 2; +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +message ClusterTrustBundleSpec { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + optional string signerName = 1; + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + optional string trustBundle = 2; +} + diff --git a/vendor/k8s.io/api/certificates/v1alpha1/register.go b/vendor/k8s.io/api/certificates/v1alpha1/register.go new file mode 100644 index 000000000..7288ed9a3 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + localSchemeBuilder = &SchemeBuilder + + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterTrustBundle{}, + &ClusterTrustBundleList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types.go b/vendor/k8s.io/api/certificates/v1alpha1/types.go new file mode 100644 index 000000000..1a9fda011 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors +// (root certificates). +// +// ClusterTrustBundle objects are considered to be readable by any authenticated +// user in the cluster, because they can be mounted by pods using the +// `clusterTrustBundle` projection. All service accounts have read access to +// ClusterTrustBundles by default. Users who only have namespace-level access +// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount +// that they have access to. +// +// It can be optionally associated with a particular assigner, in which case it +// contains one valid set of trust anchors for that signer. Signers may have +// multiple associated ClusterTrustBundles; each is an independent set of trust +// anchors for that signer. Admission control is used to enforce that only users +// with permissions on the signer can create or modify the corresponding bundle. +type ClusterTrustBundle struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the signer (if any) and trust anchors. + Spec ClusterTrustBundleSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterTrustBundleSpec contains the signer and trust anchors. +type ClusterTrustBundleSpec struct { + // signerName indicates the associated signer, if any. + // + // In order to create or update a ClusterTrustBundle that sets signerName, + // you must have the following cluster-scoped permission: + // group=certificates.k8s.io resource=signers resourceName= + // verb=attest. + // + // If signerName is not empty, then the ClusterTrustBundle object must be + // named with the signer name as a prefix (translating slashes to colons). + // For example, for the signer name `example.com/foo`, valid + // ClusterTrustBundle object names include `example.com:foo:abc` and + // `example.com:foo:v1`. + // + // If signerName is empty, then the ClusterTrustBundle object's name must + // not have such a prefix. + // + // List/watch requests for ClusterTrustBundles can filter on this field + // using a `spec.signerName=NAME` field selector. + // + // +optional + SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,opt,name=signerName"` + + // trustBundle contains the individual X.509 trust anchors for this + // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates. + // + // The data must consist only of PEM certificate blocks that parse as valid + // X.509 certificates. Each certificate must include a basic constraints + // extension with the CA bit set. The API server will reject objects that + // contain duplicate certificates, or that use PEM block headers. + // + // Users of ClusterTrustBundles, including Kubelet, are free to reorder and + // deduplicate certificate blocks in this file according to their own logic, + // as well as to drop PEM block headers and inter-block data. + TrustBundle string `json:"trustBundle" protobuf:"bytes,2,opt,name=trustBundle"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterTrustBundleList is a collection of ClusterTrustBundle objects +type ClusterTrustBundleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata contains the list metadata. + // + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a collection of ClusterTrustBundle objects + Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..bff649e3c --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,60 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterTrustBundle = map[string]string{ + "": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "metadata": "metadata contains the object metadata.", + "spec": "spec contains the signer (if any) and trust anchors.", +} + +func (ClusterTrustBundle) SwaggerDoc() map[string]string { + return map_ClusterTrustBundle +} + +var map_ClusterTrustBundleList = map[string]string{ + "": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "metadata": "metadata contains the list metadata.", + "items": "items is a collection of ClusterTrustBundle objects", +} + +func (ClusterTrustBundleList) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleList +} + +var map_ClusterTrustBundleSpec = map[string]string{ + "": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "signerName": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName= verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "trustBundle": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", +} + +func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..30a4dc1e8 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,102 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundle) DeepCopyInto(out *ClusterTrustBundle) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundle. +func (in *ClusterTrustBundle) DeepCopy() *ClusterTrustBundle { + if in == nil { + return nil + } + out := new(ClusterTrustBundle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundle) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleList) DeepCopyInto(out *ClusterTrustBundleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterTrustBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleList. +func (in *ClusterTrustBundleList) DeepCopy() *ClusterTrustBundleList { + if in == nil { + return nil + } + out := new(ClusterTrustBundleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterTrustBundleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleSpec) DeepCopyInto(out *ClusterTrustBundleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleSpec. +func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec { + if in == nil { + return nil + } + out := new(ClusterTrustBundleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..dfafa656c --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index d9a297644..b6d8ab3f5 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto +// source: k8s.io/api/certificates/v1beta1/generated.proto package v1beta1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{0} + return fileDescriptor_6529c11a462c48a5, []int{0} } func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{1} + return fileDescriptor_6529c11a462c48a5, []int{1} } func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{2} + return fileDescriptor_6529c11a462c48a5, []int{2} } func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{3} + return fileDescriptor_6529c11a462c48a5, []int{3} } func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{4} + return fileDescriptor_6529c11a462c48a5, []int{4} } func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo func (m *ExtraValue) Reset() { *m = ExtraValue{} } func (*ExtraValue) ProtoMessage() {} func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{5} + return fileDescriptor_6529c11a462c48a5, []int{5} } func (m *ExtraValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -225,68 +225,68 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) + proto.RegisterFile("k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_6529c11a462c48a5) } -var fileDescriptor_09d156762b8218ef = []byte{ - // 912 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x6e, 0x1b, 0x45, - 0x14, 0xf6, 0xc6, 0xff, 0xe3, 0x92, 0xb6, 0x23, 0xa8, 0x16, 0x4b, 0xf5, 0x5a, 0x16, 0xa0, 0xf0, - 0xd3, 0x59, 0x52, 0x55, 0x10, 0xe5, 0x02, 0xc1, 0x86, 0x08, 0x22, 0x52, 0x90, 0x26, 0x09, 0x17, - 0x08, 0x89, 0x8e, 0xd7, 0xa7, 0x9b, 0xa9, 0xbb, 0x3f, 0xec, 0xcc, 0x9a, 0xfa, 0xae, 0x8f, 0xc0, - 0x25, 0x97, 0xbc, 0x03, 0x2f, 0x11, 0x2e, 0x90, 0x7a, 0xd9, 0x0b, 0x64, 0x11, 0xf7, 0x2d, 0x72, - 0x85, 0x66, 0x76, 0xbc, 0x76, 0xec, 0x84, 0x94, 0xf6, 0xce, 0xf3, 0xcd, 0xf9, 0xbe, 0xef, 0x9c, - 0xb3, 0xe7, 0x8c, 0xd1, 0xd7, 0xc3, 0x2d, 0x41, 0x78, 0xec, 0x0e, 0xb3, 0x3e, 0xa4, 0x11, 0x48, - 0x10, 0xee, 0x08, 0xa2, 0x41, 0x9c, 0xba, 0xe6, 0x82, 0x25, 0xdc, 0xf5, 0x21, 0x95, 0xfc, 0x21, - 0xf7, 0x99, 0xbe, 0xde, 0xec, 0x83, 0x64, 0x9b, 0x6e, 0x00, 0x11, 0xa4, 0x4c, 0xc2, 0x80, 0x24, - 0x69, 0x2c, 0x63, 0xec, 0xe4, 0x04, 0xc2, 0x12, 0x4e, 0x16, 0x09, 0xc4, 0x10, 0xda, 0x77, 0x02, - 0x2e, 0x8f, 0xb3, 0x3e, 0xf1, 0xe3, 0xd0, 0x0d, 0xe2, 0x20, 0x76, 0x35, 0xaf, 0x9f, 0x3d, 0xd4, - 0x27, 0x7d, 0xd0, 0xbf, 0x72, 0xbd, 0x76, 0x6f, 0x31, 0x81, 0x38, 0x05, 0x77, 0xb4, 0xe2, 0xd9, - 0xbe, 0x37, 0x8f, 0x09, 0x99, 0x7f, 0xcc, 0x23, 0x48, 0xc7, 0x6e, 0x32, 0x0c, 0x14, 0x20, 0xdc, - 0x10, 0x24, 0xbb, 0x88, 0xe5, 0x5e, 0xc6, 0x4a, 0xb3, 0x48, 0xf2, 0x10, 0x56, 0x08, 0x9f, 0x5c, - 0x45, 0x10, 0xfe, 0x31, 0x84, 0x6c, 0x99, 0xd7, 0xfb, 0x73, 0x0d, 0xbd, 0xbd, 0x33, 0x6f, 0xc5, - 0x01, 0x0f, 0x22, 0x1e, 0x05, 0x14, 0x7e, 0xce, 0x40, 0x48, 0xfc, 0x00, 0x35, 0x54, 0x86, 0x03, - 0x26, 0x99, 0x6d, 0x75, 0xad, 0x8d, 0xd6, 0xdd, 0x8f, 0xc9, 0xbc, 0x87, 0x85, 0x11, 0x49, 0x86, - 0x81, 0x02, 0x04, 0x51, 0xd1, 0x64, 0xb4, 0x49, 0xbe, 0xeb, 0x3f, 0x02, 0x5f, 0xde, 0x07, 0xc9, - 0x3c, 0x7c, 0x32, 0x71, 0x4a, 0xd3, 0x89, 0x83, 0xe6, 0x18, 0x2d, 0x54, 0xf1, 0x03, 0x54, 0x11, - 0x09, 0xf8, 0xf6, 0x9a, 0x56, 0xff, 0x8c, 0x5c, 0xf1, 0x85, 0xc8, 0xa5, 0xb9, 0x1e, 0x24, 0xe0, - 0x7b, 0xd7, 0x8c, 0x57, 0x45, 0x9d, 0xa8, 0x56, 0xc6, 0xc7, 0xa8, 0x26, 0x24, 0x93, 0x99, 0xb0, - 0xcb, 0xda, 0xe3, 0xf3, 0xd7, 0xf0, 0xd0, 0x3a, 0xde, 0xba, 0x71, 0xa9, 0xe5, 0x67, 0x6a, 0xf4, - 0x7b, 0x2f, 0xca, 0xa8, 0x77, 0x29, 0x77, 0x27, 0x8e, 0x06, 0x5c, 0xf2, 0x38, 0xc2, 0x5b, 0xa8, - 0x22, 0xc7, 0x09, 0xe8, 0x86, 0x36, 0xbd, 0x77, 0x66, 0x29, 0x1f, 0x8e, 0x13, 0x38, 0x9b, 0x38, - 0x6f, 0x2e, 0xc7, 0x2b, 0x9c, 0x6a, 0x06, 0xde, 0x2f, 0x4a, 0xa9, 0x69, 0xee, 0xbd, 0xf3, 0x89, - 0x9c, 0x4d, 0x9c, 0x0b, 0x26, 0x92, 0x14, 0x4a, 0xe7, 0xd3, 0xc5, 0xef, 0xa1, 0x5a, 0x0a, 0x4c, - 0xc4, 0x91, 0x6e, 0x7e, 0x73, 0x5e, 0x16, 0xd5, 0x28, 0x35, 0xb7, 0xf8, 0x7d, 0x54, 0x0f, 0x41, - 0x08, 0x16, 0x80, 0xee, 0x60, 0xd3, 0xbb, 0x6e, 0x02, 0xeb, 0xf7, 0x73, 0x98, 0xce, 0xee, 0xf1, - 0x23, 0xb4, 0xfe, 0x98, 0x09, 0x79, 0x94, 0x0c, 0x98, 0x84, 0x43, 0x1e, 0x82, 0x5d, 0xd1, 0x3d, - 0xff, 0xe0, 0xe5, 0xa6, 0x46, 0x31, 0xbc, 0x5b, 0x46, 0x7d, 0x7d, 0xff, 0x9c, 0x12, 0x5d, 0x52, - 0xc6, 0x23, 0x84, 0x15, 0x72, 0x98, 0xb2, 0x48, 0xe4, 0x8d, 0x52, 0x7e, 0xd5, 0xff, 0xed, 0xd7, - 0x36, 0x7e, 0x78, 0x7f, 0x45, 0x8d, 0x5e, 0xe0, 0xd0, 0x9b, 0x58, 0xe8, 0xf6, 0xa5, 0x5f, 0x79, - 0x9f, 0x0b, 0x89, 0x7f, 0x5c, 0xd9, 0x1a, 0xf2, 0x72, 0xf9, 0x28, 0xb6, 0xde, 0x99, 0x1b, 0x26, - 0xa7, 0xc6, 0x0c, 0x59, 0xd8, 0x98, 0x9f, 0x50, 0x95, 0x4b, 0x08, 0x85, 0xbd, 0xd6, 0x2d, 0x6f, - 0xb4, 0xee, 0x6e, 0xbf, 0xfa, 0x38, 0x7b, 0x6f, 0x18, 0x9b, 0xea, 0x9e, 0x12, 0xa4, 0xb9, 0x6e, - 0xef, 0x8f, 0xca, 0x7f, 0x14, 0xa8, 0x16, 0x0b, 0xbf, 0x8b, 0xea, 0x69, 0x7e, 0xd4, 0xf5, 0x5d, - 0xf3, 0x5a, 0x6a, 0x1a, 0x4c, 0x04, 0x9d, 0xdd, 0x61, 0x82, 0x90, 0xe0, 0x41, 0x04, 0xe9, 0xb7, - 0x2c, 0x04, 0xbb, 0x9e, 0x0f, 0x99, 0x7a, 0x09, 0x0e, 0x0a, 0x94, 0x2e, 0x44, 0xe0, 0x1d, 0x74, - 0x13, 0x9e, 0x24, 0x3c, 0x65, 0x7a, 0x58, 0xc1, 0x8f, 0xa3, 0x81, 0xb0, 0x1b, 0x5d, 0x6b, 0xa3, - 0xea, 0xbd, 0x35, 0x9d, 0x38, 0x37, 0x77, 0x97, 0x2f, 0xe9, 0x6a, 0x3c, 0x26, 0xa8, 0x96, 0xa9, - 0x59, 0x14, 0x76, 0xb5, 0x5b, 0xde, 0x68, 0x7a, 0xb7, 0xd4, 0x44, 0x1f, 0x69, 0xe4, 0x6c, 0xe2, - 0x34, 0xbe, 0x81, 0xb1, 0x3e, 0x50, 0x13, 0x85, 0x3f, 0x42, 0x8d, 0x4c, 0x40, 0x1a, 0xa9, 0x14, - 0xf3, 0x3d, 0x28, 0x9a, 0x7f, 0x64, 0x70, 0x5a, 0x44, 0xe0, 0xdb, 0xa8, 0x9c, 0xf1, 0x81, 0xd9, - 0x83, 0x96, 0x09, 0x2c, 0x1f, 0xed, 0x7d, 0x49, 0x15, 0x8e, 0x7b, 0xa8, 0x16, 0xa4, 0x71, 0x96, - 0x08, 0xbb, 0xa2, 0xcd, 0x91, 0x32, 0xff, 0x4a, 0x23, 0xd4, 0xdc, 0xe0, 0x08, 0x55, 0xe1, 0x89, - 0x4c, 0x99, 0x5d, 0xd3, 0xdf, 0x6f, 0xef, 0xf5, 0x9e, 0x3c, 0xb2, 0xab, 0xb4, 0x76, 0x23, 0x99, - 0x8e, 0xe7, 0x9f, 0x53, 0x63, 0x34, 0xb7, 0x69, 0x03, 0x42, 0xf3, 0x18, 0x7c, 0x03, 0x95, 0x87, - 0x30, 0xce, 0xdf, 0x1e, 0xaa, 0x7e, 0xe2, 0x2f, 0x50, 0x75, 0xc4, 0x1e, 0x67, 0x60, 0x9e, 0xe0, - 0x0f, 0xaf, 0xcc, 0x47, 0xab, 0x7d, 0xaf, 0x28, 0x34, 0x67, 0x6e, 0xaf, 0x6d, 0x59, 0xbd, 0xbf, - 0x2c, 0xe4, 0x5c, 0xf1, 0x70, 0xe2, 0x5f, 0x10, 0xf2, 0x67, 0x8f, 0x91, 0xb0, 0x2d, 0x5d, 0xff, - 0xce, 0xab, 0xd7, 0x5f, 0x3c, 0x6c, 0xf3, 0xff, 0x98, 0x02, 0x12, 0x74, 0xc1, 0x0a, 0x6f, 0xa2, - 0xd6, 0x82, 0xb4, 0xae, 0xf4, 0x9a, 0x77, 0x7d, 0x3a, 0x71, 0x5a, 0x0b, 0xe2, 0x74, 0x31, 0xa6, - 0xf7, 0xa9, 0x69, 0x9b, 0x2e, 0x14, 0x3b, 0xb3, 0xa5, 0xb3, 0xf4, 0x77, 0x6d, 0x2e, 0x2f, 0xcd, - 0x76, 0xe3, 0xb7, 0xdf, 0x9d, 0xd2, 0xd3, 0xbf, 0xbb, 0x25, 0xef, 0xce, 0xc9, 0x69, 0xa7, 0xf4, - 0xec, 0xb4, 0x53, 0x7a, 0x7e, 0xda, 0x29, 0x3d, 0x9d, 0x76, 0xac, 0x93, 0x69, 0xc7, 0x7a, 0x36, - 0xed, 0x58, 0xcf, 0xa7, 0x1d, 0xeb, 0x9f, 0x69, 0xc7, 0xfa, 0xf5, 0x45, 0xa7, 0xf4, 0x43, 0xdd, - 0x54, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x95, 0x4b, 0xd3, 0xe2, 0xde, 0x08, 0x00, 0x00, +var fileDescriptor_6529c11a462c48a5 = []byte{ + // 901 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xf6, 0xc6, 0x1f, 0xb1, 0xc7, 0x21, 0x6d, 0x47, 0x50, 0x2d, 0x96, 0xea, 0xb5, 0x56, 0x80, + 0xc2, 0xd7, 0x2c, 0xa9, 0x2a, 0x88, 0x72, 0x40, 0xb0, 0x21, 0x42, 0x11, 0x29, 0x48, 0x93, 0x84, + 0x03, 0x42, 0xa2, 0x93, 0xf5, 0xdb, 0xcd, 0x34, 0xdd, 0x0f, 0x76, 0x66, 0x4d, 0x7d, 0xeb, 0x4f, + 0xe0, 0xc8, 0x91, 0xff, 0xc0, 0x9f, 0x08, 0x07, 0xa4, 0x1e, 0x7b, 0x40, 0x16, 0x71, 0xff, 0x45, + 0x4e, 0x68, 0x66, 0xc7, 0x6b, 0xc7, 0x4e, 0x70, 0x69, 0x6f, 0x3b, 0xcf, 0xbc, 0xcf, 0xf3, 0xbc, + 0xf3, 0xce, 0xfb, 0x8e, 0x8d, 0xbc, 0xd3, 0x2d, 0x41, 0x78, 0xe2, 0xb1, 0x94, 0x7b, 0x01, 0x64, + 0x92, 0x3f, 0xe4, 0x01, 0x93, 0x20, 0xbc, 0xc1, 0xe6, 0x31, 0x48, 0xb6, 0xe9, 0x85, 0x10, 0x43, + 0xc6, 0x24, 0xf4, 0x49, 0x9a, 0x25, 0x32, 0xc1, 0x4e, 0x41, 0x20, 0x2c, 0xe5, 0x64, 0x96, 0x40, + 0x0c, 0xa1, 0xf3, 0x71, 0xc8, 0xe5, 0x49, 0x7e, 0x4c, 0x82, 0x24, 0xf2, 0xc2, 0x24, 0x4c, 0x3c, + 0xcd, 0x3b, 0xce, 0x1f, 0xea, 0x95, 0x5e, 0xe8, 0xaf, 0x42, 0xaf, 0xe3, 0xce, 0x26, 0x90, 0x64, + 0xe0, 0x0d, 0x16, 0x3c, 0x3b, 0xf7, 0xa6, 0x31, 0x11, 0x0b, 0x4e, 0x78, 0x0c, 0xd9, 0xd0, 0x4b, + 0x4f, 0x43, 0x05, 0x08, 0x2f, 0x02, 0xc9, 0xae, 0x62, 0x79, 0xd7, 0xb1, 0xb2, 0x3c, 0x96, 0x3c, + 0x82, 0x05, 0xc2, 0xa7, 0xcb, 0x08, 0x22, 0x38, 0x81, 0x88, 0xcd, 0xf3, 0xdc, 0x3f, 0x57, 0xd0, + 0xdb, 0x3b, 0xd3, 0x52, 0x1c, 0xf0, 0x30, 0xe6, 0x71, 0x48, 0xe1, 0xe7, 0x1c, 0x84, 0xc4, 0x0f, + 0x50, 0x53, 0x65, 0xd8, 0x67, 0x92, 0xd9, 0x56, 0xcf, 0xda, 0x68, 0xdf, 0xfd, 0x84, 0x4c, 0x6b, + 0x58, 0x1a, 0x91, 0xf4, 0x34, 0x54, 0x80, 0x20, 0x2a, 0x9a, 0x0c, 0x36, 0xc9, 0x77, 0xc7, 0x8f, + 0x20, 0x90, 0xf7, 0x41, 0x32, 0x1f, 0x9f, 0x8d, 0x9c, 0xca, 0x78, 0xe4, 0xa0, 0x29, 0x46, 0x4b, + 0x55, 0xfc, 0x00, 0xd5, 0x44, 0x0a, 0x81, 0xbd, 0xa2, 0xd5, 0x3f, 0x27, 0x4b, 0x6e, 0x88, 0x5c, + 0x9b, 0xeb, 0x41, 0x0a, 0x81, 0xbf, 0x66, 0xbc, 0x6a, 0x6a, 0x45, 0xb5, 0x32, 0x3e, 0x41, 0x0d, + 0x21, 0x99, 0xcc, 0x85, 0x5d, 0xd5, 0x1e, 0x5f, 0xbc, 0x86, 0x87, 0xd6, 0xf1, 0xd7, 0x8d, 0x4b, + 0xa3, 0x58, 0x53, 0xa3, 0xef, 0xbe, 0xa8, 0x22, 0xf7, 0x5a, 0xee, 0x4e, 0x12, 0xf7, 0xb9, 0xe4, + 0x49, 0x8c, 0xb7, 0x50, 0x4d, 0x0e, 0x53, 0xd0, 0x05, 0x6d, 0xf9, 0xef, 0x4c, 0x52, 0x3e, 0x1c, + 0xa6, 0x70, 0x31, 0x72, 0xde, 0x9c, 0x8f, 0x57, 0x38, 0xd5, 0x0c, 0xbc, 0x5f, 0x1e, 0xa5, 0xa1, + 0xb9, 0xf7, 0x2e, 0x27, 0x72, 0x31, 0x72, 0xae, 0xe8, 0x48, 0x52, 0x2a, 0x5d, 0x4e, 0x17, 0xbf, + 0x87, 0x1a, 0x19, 0x30, 0x91, 0xc4, 0xba, 0xf8, 0xad, 0xe9, 0xb1, 0xa8, 0x46, 0xa9, 0xd9, 0xc5, + 0xef, 0xa3, 0xd5, 0x08, 0x84, 0x60, 0x21, 0xe8, 0x0a, 0xb6, 0xfc, 0x1b, 0x26, 0x70, 0xf5, 0x7e, + 0x01, 0xd3, 0xc9, 0x3e, 0x7e, 0x84, 0xd6, 0x1f, 0x33, 0x21, 0x8f, 0xd2, 0x3e, 0x93, 0x70, 0xc8, + 0x23, 0xb0, 0x6b, 0xba, 0xe6, 0x1f, 0xbc, 0x5c, 0xd7, 0x28, 0x86, 0x7f, 0xdb, 0xa8, 0xaf, 0xef, + 0x5f, 0x52, 0xa2, 0x73, 0xca, 0x78, 0x80, 0xb0, 0x42, 0x0e, 0x33, 0x16, 0x8b, 0xa2, 0x50, 0xca, + 0xaf, 0xfe, 0xbf, 0xfd, 0x3a, 0xc6, 0x0f, 0xef, 0x2f, 0xa8, 0xd1, 0x2b, 0x1c, 0xdc, 0x91, 0x85, + 0xee, 0x5c, 0x7b, 0xcb, 0xfb, 0x5c, 0x48, 0xfc, 0xe3, 0xc2, 0xd4, 0x90, 0x97, 0xcb, 0x47, 0xb1, + 0xf5, 0xcc, 0xdc, 0x34, 0x39, 0x35, 0x27, 0xc8, 0xcc, 0xc4, 0xfc, 0x84, 0xea, 0x5c, 0x42, 0x24, + 0xec, 0x95, 0x5e, 0x75, 0xa3, 0x7d, 0x77, 0xfb, 0xd5, 0xdb, 0xd9, 0x7f, 0xc3, 0xd8, 0xd4, 0xf7, + 0x94, 0x20, 0x2d, 0x74, 0xdd, 0x3f, 0x6a, 0xff, 0x71, 0x40, 0x35, 0x58, 0xf8, 0x5d, 0xb4, 0x9a, + 0x15, 0x4b, 0x7d, 0xbe, 0x35, 0xbf, 0xad, 0xba, 0xc1, 0x44, 0xd0, 0xc9, 0x1e, 0x26, 0x08, 0x09, + 0x1e, 0xc6, 0x90, 0x7d, 0xcb, 0x22, 0xb0, 0x57, 0x8b, 0x26, 0x53, 0x2f, 0xc1, 0x41, 0x89, 0xd2, + 0x99, 0x08, 0xbc, 0x83, 0x6e, 0xc1, 0x93, 0x94, 0x67, 0x4c, 0x37, 0x2b, 0x04, 0x49, 0xdc, 0x17, + 0x76, 0xb3, 0x67, 0x6d, 0xd4, 0xfd, 0xb7, 0xc6, 0x23, 0xe7, 0xd6, 0xee, 0xfc, 0x26, 0x5d, 0x8c, + 0xc7, 0x04, 0x35, 0x72, 0xd5, 0x8b, 0xc2, 0xae, 0xf7, 0xaa, 0x1b, 0x2d, 0xff, 0xb6, 0xea, 0xe8, + 0x23, 0x8d, 0x5c, 0x8c, 0x9c, 0xe6, 0x37, 0x30, 0xd4, 0x0b, 0x6a, 0xa2, 0xf0, 0x47, 0xa8, 0x99, + 0x0b, 0xc8, 0x62, 0x95, 0x62, 0x31, 0x07, 0x65, 0xf1, 0x8f, 0x0c, 0x4e, 0xcb, 0x08, 0x7c, 0x07, + 0x55, 0x73, 0xde, 0x37, 0x73, 0xd0, 0x36, 0x81, 0xd5, 0xa3, 0xbd, 0xaf, 0xa8, 0xc2, 0xb1, 0x8b, + 0x1a, 0x61, 0x96, 0xe4, 0xa9, 0xb0, 0x6b, 0xda, 0x1c, 0x29, 0xf3, 0xaf, 0x35, 0x42, 0xcd, 0x0e, + 0x8e, 0x51, 0x1d, 0x9e, 0xc8, 0x8c, 0xd9, 0x0d, 0x7d, 0x7f, 0x7b, 0xaf, 0xf7, 0xe4, 0x91, 0x5d, + 0xa5, 0xb5, 0x1b, 0xcb, 0x6c, 0x38, 0xbd, 0x4e, 0x8d, 0xd1, 0xc2, 0xa6, 0x03, 0x08, 0x4d, 0x63, + 0xf0, 0x4d, 0x54, 0x3d, 0x85, 0x61, 0xf1, 0xf6, 0x50, 0xf5, 0x89, 0xbf, 0x44, 0xf5, 0x01, 0x7b, + 0x9c, 0x83, 0x79, 0x82, 0x3f, 0x5c, 0x9a, 0x8f, 0x56, 0xfb, 0x5e, 0x51, 0x68, 0xc1, 0xdc, 0x5e, + 0xd9, 0xb2, 0xdc, 0xbf, 0x2c, 0xe4, 0x2c, 0x79, 0x38, 0xf1, 0x2f, 0x08, 0x05, 0x93, 0xc7, 0x48, + 0xd8, 0x96, 0x3e, 0xff, 0xce, 0xab, 0x9f, 0xbf, 0x7c, 0xd8, 0xa6, 0xbf, 0x31, 0x25, 0x24, 0xe8, + 0x8c, 0x15, 0xde, 0x44, 0xed, 0x19, 0x69, 0x7d, 0xd2, 0x35, 0xff, 0xc6, 0x78, 0xe4, 0xb4, 0x67, + 0xc4, 0xe9, 0x6c, 0x8c, 0xfb, 0x99, 0x29, 0x9b, 0x3e, 0x28, 0x76, 0x26, 0x43, 0x67, 0xe9, 0x7b, + 0x6d, 0xcd, 0x0f, 0xcd, 0x76, 0xf3, 0xb7, 0xdf, 0x9d, 0xca, 0xd3, 0xbf, 0x7b, 0x15, 0x7f, 0xf7, + 0xec, 0xbc, 0x5b, 0x79, 0x76, 0xde, 0xad, 0x3c, 0x3f, 0xef, 0x56, 0x9e, 0x8e, 0xbb, 0xd6, 0xd9, + 0xb8, 0x6b, 0x3d, 0x1b, 0x77, 0xad, 0xe7, 0xe3, 0xae, 0xf5, 0xcf, 0xb8, 0x6b, 0xfd, 0xfa, 0xa2, + 0x5b, 0xf9, 0xc1, 0x59, 0xf2, 0xdf, 0xe5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x2f, 0x11, + 0xe8, 0xdd, 0x08, 0x00, 0x00, } func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 904e95207..f3ec4c06e 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -27,12 +27,12 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/certificates/v1beta1"; // Describes a certificate signing request message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -65,18 +65,18 @@ message CertificateSigningRequestCondition { // timestamp for the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated CertificateSigningRequest items = 2; } @@ -119,15 +119,15 @@ message CertificateSigningRequestSpec { // // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. // - // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. - // // +optional optional int32 expirationSeconds = 8; // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index 9961057c5..7e5a5c198 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -84,15 +84,15 @@ type CertificateSigningRequestSpec struct { // // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes. // - // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate. - // // +optional ExpirationSeconds *int32 `json:"expirationSeconds,omitempty" protobuf:"varint,8,opt,name=expirationSeconds"` // allowedUsages specifies a set of usage contexts the key will be // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // See: + // https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + // // Valid values are: // "signing", // "digital signature", @@ -231,8 +231,9 @@ type CertificateSigningRequestList struct { } // KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// See: // +// https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 type KeyUsage string diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index 816a618cb..f9ab1f13d 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CertificateSigningRequest = map[string]string{ @@ -54,8 +54,8 @@ var map_CertificateSigningRequestSpec = map[string]string{ "": "CertificateSigningRequestSpec contains the certificate request.", "request": "Base64-encoded PKCS#10 CSR data", "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", - "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.\n\nAs of v1.22, this field is beta and is controlled via the CSRDuration feature gate.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", + "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See:\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.3\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.12\n\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"", "username": "Information about the requesting user. See user.Info interface for details.", "uid": "UID information about the requesting user. See user.Info interface for details.", "groups": "Group information about the requesting user. See user.Info interface for details.", diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go index fc2f4f2c6..9b2fbbda3 100644 --- a/vendor/k8s.io/api/coordination/v1/doc.go +++ b/vendor/k8s.io/api/coordination/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=coordination.k8s.io diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go index d5ed0f27c..cf6702aef 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto +// source: k8s.io/api/coordination/v1/generated.proto package v1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Lease) Reset() { *m = Lease{} } func (*Lease) ProtoMessage() {} func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{0} + return fileDescriptor_239d5a4df3139dce, []int{0} } func (m *Lease) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_Lease proto.InternalMessageInfo func (m *LeaseList) Reset() { *m = LeaseList{} } func (*LeaseList) ProtoMessage() {} func (*LeaseList) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{1} + return fileDescriptor_239d5a4df3139dce, []int{1} } func (m *LeaseList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_LeaseList proto.InternalMessageInfo func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } func (*LeaseSpec) ProtoMessage() {} func (*LeaseSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{2} + return fileDescriptor_239d5a4df3139dce, []int{2} } func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,45 +135,48 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) -} - -var fileDescriptor_929e1148ad9baca3 = []byte{ - // 535 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, - 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, - 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, - 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, - 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, - 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, - 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, - 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, - 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, - 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, - 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, - 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, - 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, - 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, - 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, - 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, - 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, - 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, - 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, - 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, - 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, - 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, - 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, - 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, - 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, - 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, - 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, - 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, - 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, - 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, - 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, - 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, - 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/coordination/v1/generated.proto", fileDescriptor_239d5a4df3139dce) +} + +var fileDescriptor_239d5a4df3139dce = []byte{ + // 588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x40, + 0x14, 0xc6, 0xb7, 0xb0, 0xab, 0xec, 0xac, 0xfc, 0xc9, 0xc8, 0x45, 0xb3, 0x17, 0x2d, 0x92, 0x98, + 0x10, 0x13, 0xa7, 0x42, 0x8c, 0x31, 0x26, 0x26, 0x58, 0x89, 0x4a, 0xb2, 0x44, 0x53, 0xb8, 0x32, + 0x5c, 0x38, 0xdb, 0x1e, 0xba, 0x23, 0xb4, 0x53, 0x67, 0x66, 0x31, 0xdc, 0xf9, 0x08, 0x3e, 0x81, + 0xef, 0xa0, 0x4f, 0xc1, 0x25, 0x97, 0x5c, 0x35, 0x32, 0xbe, 0x85, 0x57, 0x66, 0x66, 0x0b, 0x0b, + 0xcb, 0x6e, 0x20, 0xde, 0x75, 0xce, 0x39, 0xdf, 0xef, 0x7c, 0x73, 0x4e, 0x5b, 0xf4, 0x68, 0xff, + 0xb9, 0x24, 0x8c, 0x07, 0xb4, 0x60, 0x41, 0xcc, 0xb9, 0x48, 0x58, 0x4e, 0x15, 0xe3, 0x79, 0x70, + 0xb8, 0x1a, 0xa4, 0x90, 0x83, 0xa0, 0x0a, 0x12, 0x52, 0x08, 0xae, 0x38, 0x6e, 0x0f, 0x6a, 0x09, + 0x2d, 0x18, 0xb9, 0x5c, 0x4b, 0x0e, 0x57, 0xdb, 0x8f, 0x53, 0xa6, 0x7a, 0xfd, 0x2e, 0x89, 0x79, + 0x16, 0xa4, 0x3c, 0xe5, 0x81, 0x95, 0x74, 0xfb, 0x7b, 0xf6, 0x64, 0x0f, 0xf6, 0x69, 0x80, 0x6a, + 0x3f, 0x1d, 0xb6, 0xcd, 0x68, 0xdc, 0x63, 0x39, 0x88, 0xa3, 0xa0, 0xd8, 0x4f, 0x4d, 0x40, 0x06, + 0x19, 0x28, 0x3a, 0xc6, 0x40, 0x3b, 0x98, 0xa4, 0x12, 0xfd, 0x5c, 0xb1, 0x0c, 0xae, 0x09, 0x9e, + 0xdd, 0x24, 0x90, 0x71, 0x0f, 0x32, 0x3a, 0xaa, 0x5b, 0xfe, 0xe5, 0xa0, 0x46, 0x07, 0xa8, 0x04, + 0xfc, 0x09, 0xcd, 0x18, 0x37, 0x09, 0x55, 0xd4, 0x75, 0x96, 0x9c, 0x95, 0xd6, 0xda, 0x13, 0x32, + 0x1c, 0xc3, 0x05, 0x94, 0x14, 0xfb, 0xa9, 0x09, 0x48, 0x62, 0xaa, 0xc9, 0xe1, 0x2a, 0x79, 0xdf, + 0xfd, 0x0c, 0xb1, 0xda, 0x02, 0x45, 0x43, 0x7c, 0x5c, 0xfa, 0x35, 0x5d, 0xfa, 0x68, 0x18, 0x8b, + 0x2e, 0xa8, 0xf8, 0x2d, 0xaa, 0xcb, 0x02, 0x62, 0x77, 0xca, 0xd2, 0x1f, 0x92, 0xc9, 0x43, 0x26, + 0xd6, 0xd2, 0x76, 0x01, 0x71, 0x78, 0xaf, 0x42, 0xd6, 0xcd, 0x29, 0xb2, 0x80, 0xe5, 0x9f, 0x0e, + 0x6a, 0xda, 0x8a, 0x0e, 0x93, 0x0a, 0xef, 0x5e, 0x33, 0x4e, 0x6e, 0x67, 0xdc, 0xa8, 0xad, 0xed, + 0x85, 0xaa, 0xc7, 0xcc, 0x79, 0xe4, 0x92, 0xe9, 0x37, 0xa8, 0xc1, 0x14, 0x64, 0xd2, 0x9d, 0x5a, + 0x9a, 0x5e, 0x69, 0xad, 0x3d, 0xb8, 0xd1, 0x75, 0x38, 0x5b, 0xd1, 0x1a, 0x9b, 0x46, 0x17, 0x0d, + 0xe4, 0xcb, 0x3f, 0xea, 0x95, 0x67, 0x73, 0x0f, 0xfc, 0x02, 0xcd, 0xf5, 0xf8, 0x41, 0x02, 0x62, + 0x33, 0x81, 0x5c, 0x31, 0x75, 0x64, 0x9d, 0x37, 0x43, 0xac, 0x4b, 0x7f, 0xee, 0xdd, 0x95, 0x4c, + 0x34, 0x52, 0x89, 0x3b, 0x68, 0xf1, 0xc0, 0x80, 0x36, 0xfa, 0xc2, 0x76, 0xde, 0x86, 0x98, 0xe7, + 0x89, 0xb4, 0x63, 0x6d, 0x84, 0xae, 0x2e, 0xfd, 0xc5, 0xce, 0x98, 0x7c, 0x34, 0x56, 0x85, 0xbb, + 0xa8, 0x45, 0xe3, 0x2f, 0x7d, 0x26, 0x60, 0x87, 0x65, 0xe0, 0x4e, 0xdb, 0x01, 0x06, 0xb7, 0x1b, + 0xe0, 0x16, 0x8b, 0x05, 0x37, 0xb2, 0x70, 0x5e, 0x97, 0x7e, 0xeb, 0xd5, 0x90, 0x13, 0x5d, 0x86, + 0xe2, 0x5d, 0xd4, 0x14, 0x90, 0xc3, 0x57, 0xdb, 0xa1, 0xfe, 0x7f, 0x1d, 0x66, 0x75, 0xe9, 0x37, + 0xa3, 0x73, 0x4a, 0x34, 0x04, 0xe2, 0x75, 0xb4, 0x60, 0x6f, 0xb6, 0x23, 0x68, 0x2e, 0x99, 0xb9, + 0x9b, 0x74, 0x1b, 0x76, 0x16, 0x8b, 0xba, 0xf4, 0x17, 0x3a, 0x23, 0xb9, 0xe8, 0x5a, 0x35, 0xde, + 0x40, 0x33, 0x52, 0x99, 0xaf, 0x22, 0x3d, 0x72, 0xef, 0xd8, 0x3d, 0xac, 0x98, 0xb7, 0x61, 0xbb, + 0x8a, 0xfd, 0x2d, 0x7d, 0xf7, 0xf5, 0xf9, 0xaa, 0x21, 0x19, 0x6c, 0xb1, 0xca, 0x45, 0x17, 0x4a, + 0xfc, 0x12, 0xcd, 0x17, 0x02, 0xf6, 0x40, 0x08, 0x48, 0x06, 0x2b, 0x74, 0xef, 0x5a, 0xd8, 0x7d, + 0x5d, 0xfa, 0xf3, 0x1f, 0xae, 0xa6, 0xa2, 0xd1, 0xda, 0x70, 0xfd, 0xf8, 0xcc, 0xab, 0x9d, 0x9c, + 0x79, 0xb5, 0xd3, 0x33, 0xaf, 0xf6, 0x4d, 0x7b, 0xce, 0xb1, 0xf6, 0x9c, 0x13, 0xed, 0x39, 0xa7, + 0xda, 0x73, 0x7e, 0x6b, 0xcf, 0xf9, 0xfe, 0xc7, 0xab, 0x7d, 0x6c, 0x4f, 0xfe, 0x8b, 0xfd, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0xf8, 0xf4, 0xd4, 0x78, 0xe2, 0x04, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -286,6 +289,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -395,6 +412,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -441,6 +466,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -860,6 +887,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto index 4d887850d..4d4f7e08f 100644 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -26,15 +26,15 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/coordination/v1"; // Lease defines a lease concept. message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -45,36 +45,52 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // to wait to force acquire it. This is measured against the time of last + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go index 7a5605ace..5307cea88 100644 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -20,8 +20,21 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type CoordinatedLeaseStrategy string + +// CoordinatedLeaseStrategy defines the strategy for picking the leader for coordinated leader election. +const ( + // OldestEmulationVersion picks the oldest LeaseCandidate, where "oldest" is defined as follows + // 1) Select the candidate(s) with the lowest emulation version + // 2) If multiple candidates have the same emulation version, select the candidate(s) with the lowest binary version. (Note that binary version must be greater or equal to emulation version) + // 3) If multiple candidates have the same binary version, select the candidate with the oldest creationTimestamp. + // If a candidate does not specify the emulationVersion and binaryVersion fields, it will not be considered a candidate for the lease. + OldestEmulationVersion CoordinatedLeaseStrategy = "OldestEmulationVersion" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // Lease defines a lease concept. type Lease struct { @@ -30,7 +43,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -39,11 +52,13 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // to wait to force acquire it. This is measured against the time of last + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -57,9 +72,22 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LeaseList is a list of Lease objects. type LeaseList struct { @@ -69,6 +97,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index 0f1440430..6c1a7ea8b 100644 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measured against the time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election. If the field is not specified, there is no active coordination for this lease. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up. This field can only be set if Strategy is also set.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 99f6b0be7..4d549cc99 100644 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -111,6 +111,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..a22632cba --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Lease) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/doc.go b/vendor/k8s.io/api/coordination/v1alpha1/doc.go new file mode 100644 index 000000000..33a0b0ea9 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=coordination.k8s.io + +package v1alpha1 // import "k8s.io/api/coordination/v1alpha1" diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go new file mode 100644 index 000000000..9e072e62d --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.pb.go @@ -0,0 +1,1036 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/coordination/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} } +func (*LeaseCandidate) ProtoMessage() {} +func (*LeaseCandidate) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{0} +} +func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidate) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidate.Merge(m, src) +} +func (m *LeaseCandidate) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidate) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidate.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo + +func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} } +func (*LeaseCandidateList) ProtoMessage() {} +func (*LeaseCandidateList) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{1} +} +func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateList.Merge(m, src) +} +func (m *LeaseCandidateList) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo + +func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} } +func (*LeaseCandidateSpec) ProtoMessage() {} +func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cb9e87df9da593c2, []int{2} +} +func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateSpec.Merge(m, src) +} +func (m *LeaseCandidateSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidate") + proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateList") + proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha1.LeaseCandidateSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/coordination/v1alpha1/generated.proto", fileDescriptor_cb9e87df9da593c2) +} + +var fileDescriptor_cb9e87df9da593c2 = []byte{ + // 570 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcb, 0x6e, 0xd3, 0x4c, + 0x14, 0xc7, 0xe3, 0x36, 0xf9, 0x94, 0xcc, 0xd7, 0xa0, 0x30, 0x15, 0x92, 0x95, 0x85, 0x13, 0x65, + 0x55, 0x21, 0x31, 0x6e, 0xa0, 0x42, 0x48, 0xec, 0x5c, 0x40, 0x42, 0x4a, 0x4b, 0xe5, 0x42, 0x25, + 0x50, 0x17, 0x4c, 0xec, 0x53, 0x67, 0x48, 0x7c, 0xd1, 0x78, 0x52, 0x94, 0x5d, 0x17, 0x3c, 0x00, + 0x8f, 0x15, 0x58, 0x75, 0xd9, 0x55, 0x44, 0xcc, 0x5b, 0xb0, 0x42, 0x33, 0xb1, 0x73, 0x6d, 0x94, + 0x88, 0x5d, 0xce, 0xe5, 0xf7, 0x3f, 0xe7, 0x7f, 0xac, 0x0c, 0x3a, 0xec, 0xbe, 0x88, 0x09, 0x0b, + 0x4d, 0x1a, 0x31, 0xd3, 0x09, 0x43, 0xee, 0xb2, 0x80, 0x0a, 0x16, 0x06, 0xe6, 0x75, 0x93, 0xf6, + 0xa2, 0x0e, 0x6d, 0x9a, 0x1e, 0x04, 0xc0, 0xa9, 0x00, 0x97, 0x44, 0x3c, 0x14, 0x21, 0xae, 0x4f, + 0x08, 0x42, 0x23, 0x46, 0xe6, 0x09, 0x92, 0x11, 0xd5, 0x27, 0x1e, 0x13, 0x9d, 0x7e, 0x9b, 0x38, + 0xa1, 0x6f, 0x7a, 0xa1, 0x17, 0x9a, 0x0a, 0x6c, 0xf7, 0xaf, 0x54, 0xa4, 0x02, 0xf5, 0x6b, 0x22, + 0x58, 0x7d, 0xbc, 0x7e, 0x85, 0xe5, 0xe1, 0xd5, 0xa3, 0x59, 0xaf, 0x4f, 0x9d, 0x0e, 0x0b, 0x80, + 0x0f, 0xcc, 0xa8, 0xeb, 0xc9, 0x44, 0x6c, 0xfa, 0x20, 0xe8, 0x7d, 0x94, 0xb9, 0x8e, 0xe2, 0xfd, + 0x40, 0x30, 0x1f, 0x56, 0x80, 0xe7, 0x9b, 0x80, 0xd8, 0xe9, 0x80, 0x4f, 0x97, 0xb9, 0xc6, 0x4f, + 0x0d, 0x3d, 0x68, 0x01, 0x8d, 0xe1, 0x98, 0x06, 0x2e, 0x73, 0xa9, 0x00, 0xfc, 0x19, 0x15, 0xe5, + 0x5a, 0x2e, 0x15, 0x54, 0xd7, 0xea, 0xda, 0xc1, 0xff, 0x4f, 0x0f, 0xc9, 0xec, 0x82, 0x53, 0x75, + 0x12, 0x75, 0x3d, 0x99, 0x88, 0x89, 0xec, 0x26, 0xd7, 0x4d, 0xf2, 0xae, 0xfd, 0x05, 0x1c, 0x71, + 0x02, 0x82, 0x5a, 0x78, 0x38, 0xaa, 0xe5, 0x92, 0x51, 0x0d, 0xcd, 0x72, 0xf6, 0x54, 0x15, 0x5f, + 0xa0, 0x7c, 0x1c, 0x81, 0xa3, 0xef, 0x28, 0xf5, 0x23, 0xb2, 0xe9, 0xfb, 0x90, 0xc5, 0x0d, 0xcf, + 0x23, 0x70, 0xac, 0xbd, 0x74, 0x42, 0x5e, 0x46, 0xb6, 0xd2, 0x6b, 0xfc, 0xd0, 0x10, 0x5e, 0x6c, + 0x6d, 0xb1, 0x58, 0xe0, 0xcb, 0x15, 0x43, 0x64, 0x3b, 0x43, 0x92, 0x56, 0x76, 0x2a, 0xe9, 0xb0, + 0x62, 0x96, 0x99, 0x33, 0xf3, 0x01, 0x15, 0x98, 0x00, 0x3f, 0xd6, 0x77, 0xea, 0xbb, 0x4b, 0xb7, + 0xda, 0xca, 0x8d, 0x55, 0x4e, 0xc5, 0x0b, 0x6f, 0xa5, 0x8c, 0x3d, 0x51, 0x6b, 0x7c, 0xcb, 0x2f, + 0x7b, 0x91, 0x46, 0xb1, 0x89, 0x4a, 0x3d, 0x99, 0x3d, 0xa5, 0x3e, 0x28, 0x33, 0x25, 0xeb, 0x61, + 0xca, 0x97, 0x5a, 0x59, 0xc1, 0x9e, 0xf5, 0xe0, 0x8f, 0xa8, 0x18, 0xb1, 0xc0, 0x7b, 0xcf, 0x7c, + 0x48, 0xef, 0x6d, 0x6e, 0x67, 0xfe, 0x84, 0x39, 0x3c, 0x94, 0x98, 0xb5, 0x27, 0x9d, 0x9f, 0xa5, + 0x22, 0xf6, 0x54, 0x0e, 0x5f, 0xa2, 0x12, 0x87, 0x00, 0xbe, 0x2a, 0xed, 0xdd, 0x7f, 0xd3, 0x2e, + 0xcb, 0xc5, 0xed, 0x4c, 0xc5, 0x9e, 0x09, 0xe2, 0x97, 0xa8, 0xdc, 0x66, 0x01, 0xe5, 0x83, 0x0b, + 0xe0, 0x31, 0x0b, 0x03, 0x3d, 0xaf, 0xdc, 0x3e, 0x4a, 0xdd, 0x96, 0xad, 0xf9, 0xa2, 0xbd, 0xd8, + 0x8b, 0x5f, 0xa1, 0x0a, 0xf8, 0xfd, 0x9e, 0x3a, 0x7c, 0xc6, 0x17, 0x14, 0xaf, 0xa7, 0x7c, 0xe5, + 0xf5, 0x52, 0xdd, 0x5e, 0x21, 0xf0, 0x8d, 0x86, 0xf6, 0x23, 0x0e, 0x57, 0xc0, 0x39, 0xb8, 0xe7, + 0x42, 0xfe, 0x6f, 0x3c, 0x06, 0xb1, 0xfe, 0x5f, 0x7d, 0xf7, 0xa0, 0x64, 0x9d, 0x26, 0xa3, 0xda, + 0xfe, 0xd9, 0x6a, 0xf9, 0xcf, 0xa8, 0xf6, 0x6c, 0xfd, 0x03, 0x41, 0x8e, 0xb3, 0x18, 0x5c, 0xf5, + 0xc1, 0x52, 0x70, 0x60, 0xdf, 0x37, 0xca, 0x7a, 0x33, 0x1c, 0x1b, 0xb9, 0xdb, 0xb1, 0x91, 0xbb, + 0x1b, 0x1b, 0xb9, 0x9b, 0xc4, 0xd0, 0x86, 0x89, 0xa1, 0xdd, 0x26, 0x86, 0x76, 0x97, 0x18, 0xda, + 0xaf, 0xc4, 0xd0, 0xbe, 0xff, 0x36, 0x72, 0x9f, 0xea, 0x9b, 0xde, 0xc4, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x05, 0x28, 0x49, 0xd9, 0x36, 0x05, 0x00, 0x00, +} + +func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PreferredStrategies) > 0 { + for iNdEx := len(m.PreferredStrategies) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PreferredStrategies[iNdEx]) + copy(dAtA[i:], m.PreferredStrategies[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PreferredStrategies[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.EmulationVersion) + copy(dAtA[i:], m.EmulationVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.BinaryVersion) + copy(dAtA[i:], m.BinaryVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion))) + i-- + dAtA[i] = 0x22 + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.PingTime != nil { + { + size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LeaseName) + copy(dAtA[i:], m.LeaseName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LeaseCandidate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseCandidateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseCandidateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LeaseName) + n += 1 + l + sovGenerated(uint64(l)) + if m.PingTime != nil { + l = m.PingTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.BinaryVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EmulationVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PreferredStrategies) > 0 { + for _, s := range m.PreferredStrategies { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LeaseCandidate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LeaseCandidate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseCandidateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidateSpec{`, + `LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`, + `PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`, + `EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`, + `PreferredStrategies:` + fmt.Sprintf("%v", this.PreferredStrategies) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LeaseCandidate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LeaseCandidate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeaseName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PingTime == nil { + m.PingTime = &v1.MicroTime{} + } + if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinaryVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmulationVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredStrategies", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredStrategies = append(m.PreferredStrategies, k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/coordination/v1alpha1/generated.proto b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto new file mode 100644 index 000000000..57895ad56 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/generated.proto @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.coordination.v1alpha1; + +import "k8s.io/api/coordination/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/coordination/v1alpha1"; + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +message LeaseCandidate { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseCandidateSpec spec = 2; +} + +// LeaseCandidateList is a list of Lease objects. +message LeaseCandidateList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated LeaseCandidate items = 2; +} + +// LeaseCandidateSpec is a specification of a Lease. +message LeaseCandidateSpec { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + optional string leaseName = 1; + + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2; + + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3; + + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string binaryVersion = 4; + + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string emulationVersion = 5; + + // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. + // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated + // leader election to make a decision about the final election strategy. This follows as + // - If all clients have strategy X as the first element in this list, strategy X will be used. + // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y + // will be used. + // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader + // election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +listType=atomic + // +required + repeated string preferredStrategies = 6; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go b/vendor/k8s.io/api/coordination/v1alpha1/register.go similarity index 78% rename from vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go rename to vendor/k8s.io/api/coordination/v1alpha1/register.go index 2acd13dea..6e57905a1 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go +++ b/vendor/k8s.io/api/coordination/v1alpha1/register.go @@ -23,7 +23,7 @@ import ( ) // GroupName is the group name use in this package -const GroupName = "client.authentication.k8s.io" +const GroupName = "coordination.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} @@ -34,22 +34,20 @@ func Resource(resource string) schema.GroupResource { } var ( - SchemeBuilder runtime.SchemeBuilder + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - +// Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ExecCredential{}, + &LeaseCandidate{}, + &LeaseCandidateList{}, ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types.go b/vendor/k8s.io/api/coordination/v1alpha1/types.go new file mode 100644 index 000000000..14066600c --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +type LeaseCandidate struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseCandidateSpec is a specification of a Lease. +type LeaseCandidateSpec struct { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"` + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"` + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"` + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + BinaryVersion string `json:"binaryVersion,omitempty" protobuf:"bytes,4,opt,name=binaryVersion"` + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"` + // PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. + // The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated + // leader election to make a decision about the final election strategy. This follows as + // - If all clients have strategy X as the first element in this list, strategy X will be used. + // - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y + // will be used. + // - If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader + // election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +listType=atomic + // +required + PreferredStrategies []v1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty" protobuf:"bytes,6,opt,name=preferredStrategies"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// LeaseCandidateList is a list of Lease objects. +type LeaseCandidateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..0e52809c8 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_LeaseCandidate = map[string]string{ + "": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (LeaseCandidate) SwaggerDoc() map[string]string { + return map_LeaseCandidate +} + +var map_LeaseCandidateList = map[string]string{ + "": "LeaseCandidateList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (LeaseCandidateList) SwaggerDoc() map[string]string { + return map_LeaseCandidateList +} + +var map_LeaseCandidateSpec = map[string]string{ + "": "LeaseCandidateSpec is a specification of a Lease.", + "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", + "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", + "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", + "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required when strategy is \"OldestEmulationVersion\"", + "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", + "preferredStrategies": "PreferredStrategies indicates the list of strategies for picking the leader for coordinated leader election. The list is ordered, and the first strategy supersedes all other strategies. The list is used by coordinated leader election to make a decision about the final election strategy. This follows as - If all clients have strategy X as the first element in this list, strategy X will be used. - If a candidate has strategy [X] and another candidate has strategy [Y, X], Y supersedes X and strategy Y\n will be used.\n- If a candidate has strategy [X, Y] and another candidate has strategy [Y, X], this is a user error and leader\n election will not operate the Lease until resolved.\n(Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", +} + +func (LeaseCandidateSpec) SwaggerDoc() map[string]string { + return map_LeaseCandidateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go similarity index 51% rename from vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go index ce614c0b8..9cf15d21d 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.deepcopy.go @@ -22,34 +22,31 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/coordination/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { +func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) { *out = *in out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(ExecCredentialStatus) - (*in).DeepCopyInto(*out) - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. -func (in *ExecCredential) DeepCopy() *ExecCredential { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate. +func (in *LeaseCandidate) DeepCopy() *LeaseCandidate { if in == nil { return nil } - out := new(ExecCredential) + out := new(LeaseCandidate) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExecCredential) DeepCopyObject() runtime.Object { +func (in *LeaseCandidate) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -57,73 +54,63 @@ func (in *ExecCredential) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { +func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) { *out = *in - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = new(Response) - (*in).DeepCopyInto(*out) + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LeaseCandidate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. -func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList. +func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList { if in == nil { return nil } - out := new(ExecCredentialSpec) + out := new(LeaseCandidateList) in.DeepCopyInto(out) return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { - *out = *in - if in.ExpirationTimestamp != nil { - in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. -func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { - if in == nil { - return nil +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c } - out := new(ExecCredentialStatus) - in.DeepCopyInto(out) - return out + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Response) DeepCopyInto(out *Response) { +func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) { *out = *in - if in.Header != nil { - in, out := &in.Header, &out.Header - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } + if in.PingTime != nil { + in, out := &in.PingTime, &out.PingTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + if in.PreferredStrategies != nil { + in, out := &in.PreferredStrategies, &out.PreferredStrategies + *out = make([]v1.CoordinatedLeaseStrategy, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. -func (in *Response) DeepCopy() *Response { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec. +func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec { if in == nil { return nil } - out := new(Response) + out := new(LeaseCandidateSpec) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..f42bef65c --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index bcd00d445..bea9b8146 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto +// source: k8s.io/api/coordination/v1beta1/generated.proto package v1beta1 @@ -25,6 +25,8 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -47,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Lease) Reset() { *m = Lease{} } func (*Lease) ProtoMessage() {} func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{0} + return fileDescriptor_8d4e223b8bb23da3, []int{0} } func (m *Lease) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +77,7 @@ var xxx_messageInfo_Lease proto.InternalMessageInfo func (m *LeaseList) Reset() { *m = LeaseList{} } func (*LeaseList) ProtoMessage() {} func (*LeaseList) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{1} + return fileDescriptor_8d4e223b8bb23da3, []int{1} } func (m *LeaseList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +105,7 @@ var xxx_messageInfo_LeaseList proto.InternalMessageInfo func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } func (*LeaseSpec) ProtoMessage() {} func (*LeaseSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{2} + return fileDescriptor_8d4e223b8bb23da3, []int{2} } func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,45 +137,49 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) -} - -var fileDescriptor_daca6bcd2ff63a80 = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, - 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, - 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, - 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, - 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, - 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, - 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, - 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, - 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, - 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, - 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, - 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, - 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, - 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, - 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, - 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, - 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, - 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, - 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, - 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, - 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, - 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, - 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, - 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, - 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, - 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, - 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, - 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, - 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, - 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, - 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, - 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_8d4e223b8bb23da3) +} + +var fileDescriptor_8d4e223b8bb23da3 = []byte{ + // 600 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e, + 0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1, + 0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae, + 0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f, + 0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61, + 0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d, + 0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e, + 0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8, + 0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf, + 0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe, + 0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f, + 0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26, + 0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51, + 0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51, + 0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26, + 0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1, + 0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89, + 0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73, + 0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57, + 0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe, + 0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f, + 0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83, + 0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76, + 0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8, + 0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f, + 0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66, + 0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea, + 0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b, + 0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd, + 0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc, + 0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee, + 0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e, + 0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05, + 0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee, + 0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea, + 0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed, + 0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -286,6 +292,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -395,6 +415,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -441,6 +469,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -860,6 +890,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 10b485e30..088811a74 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -21,20 +21,21 @@ syntax = "proto2"; package k8s.io.api.coordination.v1beta1; +import "k8s.io/api/coordination/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/coordination/v1beta1"; // Lease defines a lease concept. message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; @@ -45,36 +46,50 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated Lease items = 2; } // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index 8f300fca8..d63fc30a9 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +34,7 @@ type Lease struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the Lease. + // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -42,11 +43,13 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need // to wait to force acquire it. This is measure against time of last - // observed RenewTime. + // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` // acquireTime is a time when the current lease was acquired. @@ -60,6 +63,16 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -75,6 +88,6 @@ type LeaseList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index f557d265d..50fe8ea18 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (LeaseList) SwaggerDoc() map[string]string { @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index 3adfd8720..dcef1e346 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -111,6 +112,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(v1.CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index eb9517e1d..5cf6f329f 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file should be consistent with pkg/api/annotation_key_constants.go. +// This file should be consistent with pkg/apis/core/annotation_key_constants.go. package v1 @@ -54,21 +54,18 @@ const ( // SeccompLocalhostProfileNamePrefix is the prefix for specifying profiles loaded from the node's disk. SeccompLocalhostProfileNamePrefix = "localhost/" - // AppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile. - AppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // AppArmorBetaDefaultProfileAnnotatoinKey is the annotation key specifying the default AppArmor profile. - AppArmorBetaDefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" - // AppArmorBetaAllowedProfileAnnotationKey is the annotation key specifying the allowed AppArmor profiles. - AppArmorBetaAllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" + // DeprecatedAppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile. + // Deprecated: use a pod or container security context `appArmorProfile` field instead. + DeprecatedAppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" - // AppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default. - AppArmorBetaProfileRuntimeDefault = "runtime/default" + // DeprecatedAppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default. + DeprecatedAppArmorBetaProfileRuntimeDefault = "runtime/default" - // AppArmorBetaProfileNamePrefix is the prefix for specifying profiles loaded on the node. - AppArmorBetaProfileNamePrefix = "localhost/" + // DeprecatedAppArmorBetaProfileNamePrefix is the prefix for specifying profiles loaded on the node. + DeprecatedAppArmorBetaProfileNamePrefix = "localhost/" - // AppArmorBetaProfileNameUnconfined is the Unconfined AppArmor profile - AppArmorBetaProfileNameUnconfined = "unconfined" + // DeprecatedAppArmorBetaProfileNameUnconfined is the Unconfined AppArmor profile + DeprecatedAppArmorBetaProfileNameUnconfined = "unconfined" // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead. @@ -78,7 +75,7 @@ const ( // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache // an object (e.g. secret, config map) before fetching it again from apiserver. // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" @@ -144,8 +141,19 @@ const ( // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" - // AnnotationTopologyAwareHints can be used to enable or disable Topology - // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any - // other value is treated as "Disabled". - AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + // DeprecatedAnnotationTopologyAwareHints can be used to enable or disable + // Topology Aware Hints for a Service. This may be set to "Auto" or + // "Disabled". Any other value is treated as "Disabled". This annotation has + // been deprecated in favor of the "service.kubernetes.io/topology-mode" + // annotation. + DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" + + // AnnotationTopologyMode can be used to enable or disable Topology Aware + // Routing for a Service. Well known values are "Auto" and "Disabled". + // Implementations may choose to develop new topology approaches, exposing + // them with domain-prefixed values. For example, "example.com/lowest-rtt" + // could be a valid implementation-specific value for this annotation. These + // heuristics will often populate topology hints on EndpointSlices, but that + // is not a requirement. + AnnotationTopologyMode = "service.kubernetes.io/topology-mode" ) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 1bdf0b25b..bc0041b33 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -17,6 +17,8 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName= // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 0418699e6..5654ee482 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto +// source: k8s.io/api/core/v1/generated.proto package v1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{0} + return fileDescriptor_6c07b07c062484ab, []int{0} } func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo func (m *Affinity) Reset() { *m = Affinity{} } func (*Affinity) ProtoMessage() {} func (*Affinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{1} + return fileDescriptor_6c07b07c062484ab, []int{1} } func (m *Affinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,10 +105,38 @@ func (m *Affinity) XXX_DiscardUnknown() { var xxx_messageInfo_Affinity proto.InternalMessageInfo +func (m *AppArmorProfile) Reset() { *m = AppArmorProfile{} } +func (*AppArmorProfile) ProtoMessage() {} +func (*AppArmorProfile) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{2} +} +func (m *AppArmorProfile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppArmorProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppArmorProfile) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppArmorProfile.Merge(m, src) +} +func (m *AppArmorProfile) XXX_Size() int { + return m.Size() +} +func (m *AppArmorProfile) XXX_DiscardUnknown() { + xxx_messageInfo_AppArmorProfile.DiscardUnknown(m) +} + +var xxx_messageInfo_AppArmorProfile proto.InternalMessageInfo + func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } func (*AttachedVolume) ProtoMessage() {} func (*AttachedVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{2} + return fileDescriptor_6c07b07c062484ab, []int{3} } func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +164,7 @@ var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo func (m *AvoidPods) Reset() { *m = AvoidPods{} } func (*AvoidPods) ProtoMessage() {} func (*AvoidPods) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{3} + return fileDescriptor_6c07b07c062484ab, []int{4} } func (m *AvoidPods) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +192,7 @@ var xxx_messageInfo_AvoidPods proto.InternalMessageInfo func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } func (*AzureDiskVolumeSource) ProtoMessage() {} func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{4} + return fileDescriptor_6c07b07c062484ab, []int{5} } func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +220,7 @@ var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } func (*AzureFilePersistentVolumeSource) ProtoMessage() {} func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{5} + return fileDescriptor_6c07b07c062484ab, []int{6} } func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +248,7 @@ var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } func (*AzureFileVolumeSource) ProtoMessage() {} func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{6} + return fileDescriptor_6c07b07c062484ab, []int{7} } func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +276,7 @@ var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo func (m *Binding) Reset() { *m = Binding{} } func (*Binding) ProtoMessage() {} func (*Binding) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{7} + return fileDescriptor_6c07b07c062484ab, []int{8} } func (m *Binding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +304,7 @@ var xxx_messageInfo_Binding proto.InternalMessageInfo func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } func (*CSIPersistentVolumeSource) ProtoMessage() {} func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{8} + return fileDescriptor_6c07b07c062484ab, []int{9} } func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +332,7 @@ var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } func (*CSIVolumeSource) ProtoMessage() {} func (*CSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{9} + return fileDescriptor_6c07b07c062484ab, []int{10} } func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +360,7 @@ var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} func (*Capabilities) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{10} + return fileDescriptor_6c07b07c062484ab, []int{11} } func (m *Capabilities) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_Capabilities proto.InternalMessageInfo func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{11} + return fileDescriptor_6c07b07c062484ab, []int{12} } func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{12} + return fileDescriptor_6c07b07c062484ab, []int{13} } func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } func (*CinderPersistentVolumeSource) ProtoMessage() {} func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{13} + return fileDescriptor_6c07b07c062484ab, []int{14} } func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} func (*CinderVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{14} + return fileDescriptor_6c07b07c062484ab, []int{15} } func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +500,7 @@ var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} func (*ClientIPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{15} + return fileDescriptor_6c07b07c062484ab, []int{16} } func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,10 +525,38 @@ func (m *ClientIPConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo +func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} } +func (*ClusterTrustBundleProjection) ProtoMessage() {} +func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{17} +} +func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterTrustBundleProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterTrustBundleProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterTrustBundleProjection.Merge(m, src) +} +func (m *ClusterTrustBundleProjection) XXX_Size() int { + return m.Size() +} +func (m *ClusterTrustBundleProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterTrustBundleProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo + func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{16} + return fileDescriptor_6c07b07c062484ab, []int{18} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +584,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{17} + return fileDescriptor_6c07b07c062484ab, []int{19} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +612,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{18} + return fileDescriptor_6c07b07c062484ab, []int{20} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +640,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{19} + return fileDescriptor_6c07b07c062484ab, []int{21} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +668,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{20} + return fileDescriptor_6c07b07c062484ab, []int{22} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +696,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{21} + return fileDescriptor_6c07b07c062484ab, []int{23} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +724,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{22} + return fileDescriptor_6c07b07c062484ab, []int{24} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +752,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{23} + return fileDescriptor_6c07b07c062484ab, []int{25} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +780,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{24} + return fileDescriptor_6c07b07c062484ab, []int{26} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +808,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{25} + return fileDescriptor_6c07b07c062484ab, []int{27} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +836,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{26} + return fileDescriptor_6c07b07c062484ab, []int{28} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +864,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{27} + return fileDescriptor_6c07b07c062484ab, []int{29} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +892,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{28} + return fileDescriptor_6c07b07c062484ab, []int{30} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -861,10 +917,38 @@ func (m *ContainerPort) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerPort proto.InternalMessageInfo +func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } +func (*ContainerResizePolicy) ProtoMessage() {} +func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{31} +} +func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResizePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerResizePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResizePolicy.Merge(m, src) +} +func (m *ContainerResizePolicy) XXX_Size() int { + return m.Size() +} +func (m *ContainerResizePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResizePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo + func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{29} + return fileDescriptor_6c07b07c062484ab, []int{32} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +976,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_6c07b07c062484ab, []int{33} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +1004,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_6c07b07c062484ab, []int{34} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +1032,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_6c07b07c062484ab, []int{35} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1060,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_6c07b07c062484ab, []int{36} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1001,10 +1085,38 @@ func (m *ContainerStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo +func (m *ContainerUser) Reset() { *m = ContainerUser{} } +func (*ContainerUser) ProtoMessage() {} +func (*ContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{37} +} +func (m *ContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerUser.Merge(m, src) +} +func (m *ContainerUser) XXX_Size() int { + return m.Size() +} +func (m *ContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerUser proto.InternalMessageInfo + func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_6c07b07c062484ab, []int{38} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1144,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_6c07b07c062484ab, []int{39} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_6c07b07c062484ab, []int{40} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1200,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_6c07b07c062484ab, []int{41} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1228,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_6c07b07c062484ab, []int{42} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1256,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_6c07b07c062484ab, []int{43} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1284,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_6c07b07c062484ab, []int{44} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1312,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_6c07b07c062484ab, []int{45} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1340,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_6c07b07c062484ab, []int{46} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1368,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_6c07b07c062484ab, []int{47} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1396,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_6c07b07c062484ab, []int{48} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1424,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_6c07b07c062484ab, []int{49} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1452,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_6c07b07c062484ab, []int{50} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1480,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_6c07b07c062484ab, []int{51} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1508,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_6c07b07c062484ab, []int{52} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1536,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_6c07b07c062484ab, []int{53} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1564,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_6c07b07c062484ab, []int{54} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1592,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_6c07b07c062484ab, []int{55} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1620,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_6c07b07c062484ab, []int{56} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1648,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_6c07b07c062484ab, []int{57} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1676,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_6c07b07c062484ab, []int{58} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1704,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_6c07b07c062484ab, []int{59} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1732,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_6c07b07c062484ab, []int{60} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1760,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_6c07b07c062484ab, []int{61} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1788,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_6c07b07c062484ab, []int{62} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1816,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_6c07b07c062484ab, []int{63} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1844,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_6c07b07c062484ab, []int{64} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1872,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_6c07b07c062484ab, []int{65} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1900,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_6c07b07c062484ab, []int{66} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1928,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_6c07b07c062484ab, []int{67} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1956,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_6c07b07c062484ab, []int{68} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1984,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_6c07b07c062484ab, []int{69} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +2012,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_6c07b07c062484ab, []int{70} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1925,10 +2037,38 @@ func (m *HostAlias) XXX_DiscardUnknown() { var xxx_messageInfo_HostAlias proto.InternalMessageInfo +func (m *HostIP) Reset() { *m = HostIP{} } +func (*HostIP) ProtoMessage() {} +func (*HostIP) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{71} +} +func (m *HostIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostIP.Merge(m, src) +} +func (m *HostIP) XXX_Size() int { + return m.Size() +} +func (m *HostIP) XXX_DiscardUnknown() { + xxx_messageInfo_HostIP.DiscardUnknown(m) +} + +var xxx_messageInfo_HostIP proto.InternalMessageInfo + func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_6c07b07c062484ab, []int{72} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +2096,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_6c07b07c062484ab, []int{73} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +2124,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_6c07b07c062484ab, []int{74} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2009,10 +2149,38 @@ func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo +func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} } +func (*ImageVolumeSource) ProtoMessage() {} +func (*ImageVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{75} +} +func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageVolumeSource.Merge(m, src) +} +func (m *ImageVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ImageVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo + func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_6c07b07c062484ab, []int{76} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2208,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_6c07b07c062484ab, []int{77} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2236,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_6c07b07c062484ab, []int{78} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2264,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_6c07b07c062484ab, []int{79} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2292,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_6c07b07c062484ab, []int{80} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2320,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_6c07b07c062484ab, []int{81} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2348,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_6c07b07c062484ab, []int{82} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2205,10 +2373,38 @@ func (m *LimitRangeSpec) XXX_DiscardUnknown() { var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo +func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} } +func (*LinuxContainerUser) ProtoMessage() {} +func (*LinuxContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{83} +} +func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LinuxContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxContainerUser.Merge(m, src) +} +func (m *LinuxContainerUser) XXX_Size() int { + return m.Size() +} +func (m *LinuxContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo + func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_6c07b07c062484ab, []int{84} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2432,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_6c07b07c062484ab, []int{85} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2460,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_6c07b07c062484ab, []int{86} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2488,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_6c07b07c062484ab, []int{87} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2516,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_6c07b07c062484ab, []int{88} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2345,10 +2541,38 @@ func (m *LocalVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo +func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } +func (*ModifyVolumeStatus) ProtoMessage() {} +func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{89} +} +func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ModifyVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ModifyVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModifyVolumeStatus.Merge(m, src) +} +func (m *ModifyVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *ModifyVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ModifyVolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo + func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_6c07b07c062484ab, []int{90} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2600,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_6c07b07c062484ab, []int{91} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2628,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_6c07b07c062484ab, []int{92} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2656,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_6c07b07c062484ab, []int{93} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2684,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} + return fileDescriptor_6c07b07c062484ab, []int{94} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2712,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_6c07b07c062484ab, []int{95} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2740,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_6c07b07c062484ab, []int{96} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2768,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_6c07b07c062484ab, []int{97} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2796,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_6c07b07c062484ab, []int{98} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2824,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_6c07b07c062484ab, []int{99} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2852,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_6c07b07c062484ab, []int{100} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2880,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_6c07b07c062484ab, []int{101} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2908,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_6c07b07c062484ab, []int{102} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2709,10 +2933,38 @@ func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo +func (m *NodeFeatures) Reset() { *m = NodeFeatures{} } +func (*NodeFeatures) ProtoMessage() {} +func (*NodeFeatures) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{103} +} +func (m *NodeFeatures) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeFeatures) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeFeatures.Merge(m, src) +} +func (m *NodeFeatures) XXX_Size() int { + return m.Size() +} +func (m *NodeFeatures) XXX_DiscardUnknown() { + xxx_messageInfo_NodeFeatures.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo + func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_6c07b07c062484ab, []int{104} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2992,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_6c07b07c062484ab, []int{105} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2765,15 +3017,43 @@ func (m *NodeProxyOptions) XXX_DiscardUnknown() { var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} +func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} } +func (*NodeRuntimeHandler) ProtoMessage() {} +func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{106} +} +func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeRuntimeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeRuntimeHandler) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeRuntimeHandler.Merge(m, src) +} +func (m *NodeRuntimeHandler) XXX_Size() int { + return m.Size() +} +func (m *NodeRuntimeHandler) XXX_DiscardUnknown() { + xxx_messageInfo_NodeRuntimeHandler.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo + +func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} } +func (*NodeRuntimeHandlerFeatures) ProtoMessage() {} +func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{107} } -func (m *NodeResources) XXX_Unmarshal(b []byte) error { +func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *NodeRuntimeHandlerFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -2781,22 +3061,22 @@ func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *NodeResources) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeResources.Merge(m, src) +func (m *NodeRuntimeHandlerFeatures) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeRuntimeHandlerFeatures.Merge(m, src) } -func (m *NodeResources) XXX_Size() int { +func (m *NodeRuntimeHandlerFeatures) XXX_Size() int { return m.Size() } -func (m *NodeResources) XXX_DiscardUnknown() { - xxx_messageInfo_NodeResources.DiscardUnknown(m) +func (m *NodeRuntimeHandlerFeatures) XXX_DiscardUnknown() { + xxx_messageInfo_NodeRuntimeHandlerFeatures.DiscardUnknown(m) } -var xxx_messageInfo_NodeResources proto.InternalMessageInfo +var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_6c07b07c062484ab, []int{108} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +3104,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_6c07b07c062484ab, []int{109} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +3132,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_6c07b07c062484ab, []int{110} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +3160,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_6c07b07c062484ab, []int{111} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +3188,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_6c07b07c062484ab, []int{112} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +3216,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_6c07b07c062484ab, []int{113} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3244,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_6c07b07c062484ab, []int{114} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3272,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_6c07b07c062484ab, []int{115} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3300,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_6c07b07c062484ab, []int{116} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3328,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_6c07b07c062484ab, []int{117} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_6c07b07c062484ab, []int{118} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_6c07b07c062484ab, []int{119} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_6c07b07c062484ab, []int{120} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_6c07b07c062484ab, []int{121} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_6c07b07c062484ab, []int{122} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_6c07b07c062484ab, []int{123} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_6c07b07c062484ab, []int{124} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_6c07b07c062484ab, []int{125} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_6c07b07c062484ab, []int{126} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_6c07b07c062484ab, []int{127} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_6c07b07c062484ab, []int{128} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3664,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_6c07b07c062484ab, []int{129} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3692,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_6c07b07c062484ab, []int{130} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3720,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_6c07b07c062484ab, []int{131} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3748,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_6c07b07c062484ab, []int{132} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3776,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_6c07b07c062484ab, []int{133} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3804,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_6c07b07c062484ab, []int{134} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3832,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_6c07b07c062484ab, []int{135} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3860,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_6c07b07c062484ab, []int{136} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3888,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_6c07b07c062484ab, []int{137} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3916,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_6c07b07c062484ab, []int{138} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3944,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_6c07b07c062484ab, []int{139} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3972,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_6c07b07c062484ab, []int{140} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +4000,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_6c07b07c062484ab, []int{141} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +4028,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_6c07b07c062484ab, []int{142} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +4056,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_6c07b07c062484ab, []int{143} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +4084,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_6c07b07c062484ab, []int{144} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3829,10 +4109,94 @@ func (m *PodReadinessGate) XXX_DiscardUnknown() { var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo +func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } +func (*PodResourceClaim) ProtoMessage() {} +func (*PodResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{145} +} +func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodResourceClaim.Merge(m, src) +} +func (m *PodResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *PodResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_PodResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo + +func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } +func (*PodResourceClaimStatus) ProtoMessage() {} +func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{146} +} +func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodResourceClaimStatus.Merge(m, src) +} +func (m *PodResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PodResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo + +func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } +func (*PodSchedulingGate) ProtoMessage() {} +func (*PodSchedulingGate) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{147} +} +func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingGate.Merge(m, src) +} +func (m *PodSchedulingGate) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingGate.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo + func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_6c07b07c062484ab, []int{148} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +4224,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_6c07b07c062484ab, []int{149} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +4252,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_6c07b07c062484ab, []int{150} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +4280,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_6c07b07c062484ab, []int{151} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4308,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_6c07b07c062484ab, []int{152} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4336,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_6c07b07c062484ab, []int{153} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4364,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_6c07b07c062484ab, []int{154} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4392,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_6c07b07c062484ab, []int{155} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4420,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_6c07b07c062484ab, []int{156} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4448,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_6c07b07c062484ab, []int{157} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4476,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_6c07b07c062484ab, []int{158} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4504,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_6c07b07c062484ab, []int{159} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4532,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_6c07b07c062484ab, []int{160} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4560,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_6c07b07c062484ab, []int{161} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4588,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_6c07b07c062484ab, []int{162} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4616,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_6c07b07c062484ab, []int{163} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4644,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_6c07b07c062484ab, []int{164} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4672,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_6c07b07c062484ab, []int{165} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4700,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_6c07b07c062484ab, []int{166} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4728,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_6c07b07c062484ab, []int{167} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4756,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_6c07b07c062484ab, []int{168} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4784,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_6c07b07c062484ab, []int{169} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4812,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_6c07b07c062484ab, []int{170} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4840,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_6c07b07c062484ab, []int{171} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4868,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_6c07b07c062484ab, []int{172} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4529,10 +4893,38 @@ func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{173} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_6c07b07c062484ab, []int{174} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4557,10 +4949,38 @@ func (m *ResourceFieldSelector) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo +func (m *ResourceHealth) Reset() { *m = ResourceHealth{} } +func (*ResourceHealth) ProtoMessage() {} +func (*ResourceHealth) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{175} +} +func (m *ResourceHealth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHealth) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHealth.Merge(m, src) +} +func (m *ResourceHealth) XXX_Size() int { + return m.Size() +} +func (m *ResourceHealth) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHealth.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo + func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_6c07b07c062484ab, []int{176} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +5008,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_6c07b07c062484ab, []int{177} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +5036,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_6c07b07c062484ab, []int{178} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +5064,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_6c07b07c062484ab, []int{179} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +5092,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_6c07b07c062484ab, []int{180} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4697,10 +5117,38 @@ func (m *ResourceRequirements) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo +func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } +func (*ResourceStatus) ProtoMessage() {} +func (*ResourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{181} +} +func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceStatus.Merge(m, src) +} +func (m *ResourceStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo + func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_6c07b07c062484ab, []int{182} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +5176,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_6c07b07c062484ab, []int{183} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +5204,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_6c07b07c062484ab, []int{184} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +5232,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_6c07b07c062484ab, []int{185} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +5260,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_6c07b07c062484ab, []int{186} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +5288,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_6c07b07c062484ab, []int{187} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +5316,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_6c07b07c062484ab, []int{188} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +5344,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_6c07b07c062484ab, []int{189} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5372,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_6c07b07c062484ab, []int{190} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5400,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_6c07b07c062484ab, []int{191} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5428,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_6c07b07c062484ab, []int{192} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +5456,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_6c07b07c062484ab, []int{193} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5484,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_6c07b07c062484ab, []int{194} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5512,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_6c07b07c062484ab, []int{195} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5540,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_6c07b07c062484ab, []int{196} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5568,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_6c07b07c062484ab, []int{197} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5596,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_6c07b07c062484ab, []int{198} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5624,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_6c07b07c062484ab, []int{199} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5652,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_6c07b07c062484ab, []int{200} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5680,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_6c07b07c062484ab, []int{201} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5708,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_6c07b07c062484ab, []int{202} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5736,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_6c07b07c062484ab, []int{203} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5764,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_6c07b07c062484ab, []int{204} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5792,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_6c07b07c062484ab, []int{205} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5820,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_6c07b07c062484ab, []int{206} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5397,10 +5845,38 @@ func (m *SessionAffinityConfig) XXX_DiscardUnknown() { var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo +func (m *SleepAction) Reset() { *m = SleepAction{} } +func (*SleepAction) ProtoMessage() {} +func (*SleepAction) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{207} +} +func (m *SleepAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SleepAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SleepAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_SleepAction.Merge(m, src) +} +func (m *SleepAction) XXX_Size() int { + return m.Size() +} +func (m *SleepAction) XXX_DiscardUnknown() { + xxx_messageInfo_SleepAction.DiscardUnknown(m) +} + +var xxx_messageInfo_SleepAction proto.InternalMessageInfo + func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_6c07b07c062484ab, []int{208} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5904,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_6c07b07c062484ab, []int{209} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5932,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_6c07b07c062484ab, []int{210} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5960,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_6c07b07c062484ab, []int{211} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5988,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_6c07b07c062484ab, []int{212} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +6016,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_6c07b07c062484ab, []int{213} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +6044,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_6c07b07c062484ab, []int{214} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +6072,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_6c07b07c062484ab, []int{215} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +6100,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_6c07b07c062484ab, []int{216} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +6128,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} + return fileDescriptor_6c07b07c062484ab, []int{217} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5677,10 +6153,38 @@ func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo +func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } +func (*TypedObjectReference) ProtoMessage() {} +func (*TypedObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{218} +} +func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypedObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypedObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedObjectReference.Merge(m, src) +} +func (m *TypedObjectReference) XXX_Size() int { + return m.Size() +} +func (m *TypedObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TypedObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo + func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_6c07b07c062484ab, []int{219} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +6212,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_6c07b07c062484ab, []int{220} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +6240,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_6c07b07c062484ab, []int{221} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5761,10 +6265,38 @@ func (m *VolumeMount) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeMount proto.InternalMessageInfo +func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} } +func (*VolumeMountStatus) ProtoMessage() {} +func (*VolumeMountStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{222} +} +func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMountStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeMountStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMountStatus.Merge(m, src) +} +func (m *VolumeMountStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeMountStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMountStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo + func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_6c07b07c062484ab, []int{223} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +6324,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_6c07b07c062484ab, []int{224} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5817,10 +6349,38 @@ func (m *VolumeProjection) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo +func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } +func (*VolumeResourceRequirements) ProtoMessage() {} +func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{225} +} +func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeResourceRequirements.Merge(m, src) +} +func (m *VolumeResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *VolumeResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeResourceRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo + func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_6c07b07c062484ab, []int{226} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +6408,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_6c07b07c062484ab, []int{227} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +6436,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_6c07b07c062484ab, []int{228} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +6464,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_6c07b07c062484ab, []int{229} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,6 +6492,7 @@ var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo func init() { proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AppArmorProfile)(nil), "k8s.io.api.core.v1.AppArmorProfile") proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") @@ -5948,6 +6509,7 @@ func init() { proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") @@ -5963,11 +6525,14 @@ func init() { proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy") proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") + proto.RegisterType((*ContainerUser)(nil), "k8s.io.api.core.v1.ContainerUser") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6003,9 +6568,11 @@ func init() { proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostIP)(nil), "k8s.io.api.core.v1.HostIP") proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*ImageVolumeSource)(nil), "k8s.io.api.core.v1.ImageVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler") @@ -6018,11 +6585,13 @@ func init() { proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*LinuxContainerUser)(nil), "k8s.io.api.core.v1.LinuxContainerUser") proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*ModifyVolumeStatus)(nil), "k8s.io.api.core.v1.ModifyVolumeStatus") proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") @@ -6036,10 +6605,11 @@ func init() { proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeFeatures)(nil), "k8s.io.api.core.v1.NodeFeatures") proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") + proto.RegisterType((*NodeRuntimeHandler)(nil), "k8s.io.api.core.v1.NodeRuntimeHandler") + proto.RegisterType((*NodeRuntimeHandlerFeatures)(nil), "k8s.io.api.core.v1.NodeRuntimeHandlerFeatures") proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") @@ -6056,6 +6626,7 @@ func init() { proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((map[ResourceName]ClaimResourceStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourceStatusesEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourcesEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimTemplate") @@ -6082,6 +6653,9 @@ func init() { proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodResourceClaim)(nil), "k8s.io.api.core.v1.PodResourceClaim") + proto.RegisterType((*PodResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodResourceClaimStatus") + proto.RegisterType((*PodSchedulingGate)(nil), "k8s.io.api.core.v1.PodSchedulingGate") proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") @@ -6110,7 +6684,9 @@ func init() { proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim") proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceHealth)(nil), "k8s.io.api.core.v1.ResourceHealth") proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") @@ -6121,6 +6697,7 @@ func init() { proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*ResourceStatus)(nil), "k8s.io.api.core.v1.ResourceStatus") proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") @@ -6149,6 +6726,7 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*SleepAction)(nil), "k8s.io.api.core.v1.SleepAction") proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") @@ -6159,11 +6737,16 @@ func init() { proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*TypedObjectReference)(nil), "k8s.io.api.core.v1.TypedObjectReference") proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeMountStatus)(nil), "k8s.io.api.core.v1.VolumeMountStatus") proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeResourceRequirements)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.RequestsEntry") proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") @@ -6171,901 +6754,1015 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) -} - -var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 14240 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x70, 0x24, 0xd7, - 0x79, 0x18, 0xae, 0x9e, 0xc1, 0x35, 0x1f, 0xee, 0xb7, 0x07, 0xb1, 0x20, 0x77, 0xb1, 0x6c, 0x4a, - 0xcb, 0xa5, 0x48, 0x62, 0xb5, 0x3c, 0x24, 0x9a, 0x94, 0x68, 0x01, 0x18, 0x60, 0x17, 0xdc, 0x05, - 0x76, 0xf8, 0x06, 0xbb, 0x2b, 0xc9, 0x94, 0x4a, 0x8d, 0x99, 0x07, 0xa0, 0x85, 0x99, 0xee, 0x61, - 0x77, 0x0f, 0x76, 0xc1, 0x9f, 0x5c, 0x3f, 0x47, 0x3e, 0xe5, 0x23, 0xa5, 0x4a, 0x39, 0x47, 0xc9, - 0x2e, 0x57, 0xca, 0x71, 0x62, 0x2b, 0xca, 0xe5, 0xc8, 0xb1, 0x1d, 0xcb, 0x89, 0x9d, 0xdb, 0xc9, - 0x1f, 0xb6, 0xe3, 0x4a, 0x2c, 0x57, 0xb9, 0x82, 0xd8, 0xeb, 0x54, 0xb9, 0x54, 0x95, 0xd8, 0x4e, - 0x9c, 0xfc, 0x91, 0x8d, 0x2b, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x6b, 0x06, 0x4b, 0x2c, 0x44, 0xa9, - 0xf8, 0xdf, 0xcc, 0xfb, 0xbe, 0xf7, 0xbd, 0xd7, 0xef, 0xfc, 0xde, 0x77, 0xc2, 0x2b, 0xbb, 0x2f, - 0x85, 0xf3, 0xae, 0x7f, 0x69, 0xb7, 0xbb, 0x49, 0x02, 0x8f, 0x44, 0x24, 0xbc, 0xb4, 0x47, 0xbc, - 0xa6, 0x1f, 0x5c, 0x12, 0x00, 0xa7, 0xe3, 0x5e, 0x6a, 0xf8, 0x01, 0xb9, 0xb4, 0x77, 0xf9, 0xd2, - 0x36, 0xf1, 0x48, 0xe0, 0x44, 0xa4, 0x39, 0xdf, 0x09, 0xfc, 0xc8, 0x47, 0x88, 0xe3, 0xcc, 0x3b, - 0x1d, 0x77, 0x9e, 0xe2, 0xcc, 0xef, 0x5d, 0x9e, 0x7d, 0x76, 0xdb, 0x8d, 0x76, 0xba, 0x9b, 0xf3, - 0x0d, 0xbf, 0x7d, 0x69, 0xdb, 0xdf, 0xf6, 0x2f, 0x31, 0xd4, 0xcd, 0xee, 0x16, 0xfb, 0xc7, 0xfe, - 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x0b, 0x71, 0x33, 0x6d, 0xa7, 0xb1, 0xe3, 0x7a, 0x24, 0xd8, 0xbf, - 0xd4, 0xd9, 0xdd, 0x66, 0xed, 0x06, 0x24, 0xf4, 0xbb, 0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, - 0x5e, 0x6a, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0xa5, 0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, - 0xba, 0x99, 0x0f, 0xf6, 0xaa, 0x10, 0x36, 0x76, 0x48, 0xdb, 0x49, 0xd5, 0x7b, 0x3e, 0xaf, 0x5e, - 0x37, 0x72, 0x5b, 0x97, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x9a, 0x05, 0xe7, 0x17, - 0x6e, 0xd7, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, 0xad, 0x47, 0x7e, 0x40, - 0x6e, 0xf9, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0, 0x67, 0x60, 0x64, 0x8f, 0xfd, 0x5f, 0xad, - 0xce, 0x58, 0xe7, 0xad, 0x8b, 0x95, 0xc5, 0xa9, 0x5f, 0x3b, 0x98, 0x7b, 0xcf, 0xbd, 0x83, 0xb9, - 0x91, 0x5b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x01, 0x86, 0xb6, 0xc2, 0x8d, 0xfd, 0x0e, 0x99, 0x29, - 0x31, 0xdc, 0x09, 0x81, 0x3b, 0xb4, 0x52, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x25, 0xa8, 0x74, 0x9c, - 0x20, 0x72, 0x23, 0xd7, 0xf7, 0x66, 0xca, 0xe7, 0xad, 0x8b, 0x83, 0x8b, 0xd3, 0x02, 0xb5, 0x52, - 0x93, 0x00, 0x1c, 0xe3, 0xd0, 0x6e, 0x04, 0xc4, 0x69, 0xde, 0xf0, 0x5a, 0xfb, 0x33, 0x03, 0xe7, - 0xad, 0x8b, 0x23, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x62, 0x09, 0x46, 0x16, 0xb6, - 0xb6, 0x5c, 0xcf, 0x8d, 0xf6, 0xd1, 0x2d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, 0xd9, 0x57, 0x8c, - 0x3e, 0x77, 0x7e, 0x3e, 0xbd, 0x94, 0xe6, 0xd7, 0x35, 0xbc, 0xc5, 0xa9, 0x7b, 0x07, 0x73, 0x63, - 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0xa3, 0x1d, 0xbf, 0xa9, 0xc8, 0x96, 0x18, 0xd9, 0xb9, 0x2c, - 0xb2, 0xb5, 0x18, 0x6d, 0x71, 0xf2, 0xde, 0xc1, 0xdc, 0xa8, 0x56, 0x80, 0x75, 0x22, 0x68, 0x13, - 0x26, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdd, 0x32, 0xa3, 0xfb, 0x44, 0x1e, 0x5d, 0x0d, 0x75, 0xf1, - 0xc4, 0xbd, 0x83, 0xb9, 0xc9, 0x44, 0x21, 0x4e, 0x12, 0xb4, 0xdf, 0x82, 0x89, 0x85, 0x28, 0x72, - 0x1a, 0x3b, 0xa4, 0xc9, 0x67, 0x10, 0xbd, 0x00, 0x03, 0x9e, 0xd3, 0x26, 0x62, 0x7e, 0xcf, 0x8b, - 0x81, 0x1d, 0x58, 0x77, 0xda, 0xe4, 0xfe, 0xc1, 0xdc, 0xd4, 0x4d, 0xcf, 0x7d, 0xb3, 0x2b, 0x56, - 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x3d, 0x07, 0xd0, 0x24, 0x7b, 0x6e, 0x83, 0xd4, 0x9c, 0x68, 0x47, - 0xcc, 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0xef, 0x42, 0x65, 0x61, 0xcf, 0x77, - 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x5d, 0x98, 0xec, 0x04, 0x64, 0x8b, 0x04, 0xaa, 0x68, 0xc6, 0x3a, - 0x5f, 0xbe, 0x38, 0xfa, 0xdc, 0xc5, 0xcc, 0x8f, 0x35, 0x51, 0x97, 0xbd, 0x28, 0xd8, 0x5f, 0x7c, - 0x44, 0xb4, 0x37, 0x99, 0x80, 0xe2, 0x24, 0x65, 0xfb, 0x5f, 0x96, 0xe0, 0xd4, 0xc2, 0x5b, 0xdd, - 0x80, 0x54, 0xdd, 0x70, 0x37, 0xb9, 0xc2, 0x9b, 0x6e, 0xb8, 0xbb, 0x1e, 0x8f, 0x80, 0x5a, 0x5a, - 0x55, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x16, 0x86, 0xe9, 0xef, 0x9b, 0x78, 0x55, 0x7c, 0xf2, 0x09, - 0x81, 0x3c, 0x5a, 0x75, 0x22, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x35, 0x18, 0x6d, 0xb0, 0x0d, - 0xb9, 0xbd, 0xe6, 0x37, 0x09, 0x9b, 0xcc, 0xca, 0xe2, 0xd3, 0x14, 0x7d, 0x29, 0x2e, 0xbe, 0x7f, - 0x30, 0x37, 0xc3, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0x5f, 0x03, 0x8c, - 0x12, 0x64, 0xec, 0xad, 0x8b, 0xda, 0x56, 0x19, 0x64, 0x5b, 0x65, 0x2c, 0x7b, 0x9b, 0xa0, 0xcb, - 0x30, 0xb0, 0xeb, 0x7a, 0xcd, 0x99, 0x21, 0x46, 0xeb, 0x2c, 0x9d, 0xf3, 0x6b, 0xae, 0xd7, 0xbc, - 0x7f, 0x30, 0x37, 0x6d, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0xa9, 0x05, 0x73, 0x0c, 0xb6, - 0xe2, 0xb6, 0x48, 0x8d, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x0e, 0x20, 0x24, - 0x8d, 0x80, 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x57, 0x10, 0xac, 0x61, 0xd1, 0x03, 0x21, 0xdc, - 0x71, 0x02, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x2e, 0x01, 0x38, 0xc6, 0x31, 0x0e, 0x84, - 0x72, 0xaf, 0x03, 0x01, 0x7d, 0x04, 0x26, 0xe3, 0xc6, 0xc2, 0x8e, 0xd3, 0x90, 0x03, 0xc8, 0xb6, - 0x4c, 0xdd, 0x04, 0xe1, 0x24, 0xae, 0xfd, 0xb7, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xc3, 0xbf, - 0xd5, 0xfe, 0x45, 0x0b, 0x86, 0x17, 0x5d, 0xaf, 0xe9, 0x7a, 0xdb, 0xe8, 0xd3, 0x30, 0x42, 0xef, - 0xa6, 0xa6, 0x13, 0x39, 0xe2, 0xdc, 0xfb, 0x80, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, 0xd9, 0xdd, - 0xa6, 0x05, 0xe1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xc6, 0xe6, 0x67, 0x48, 0x23, 0x5a, 0x23, 0x91, - 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0x86, 0x22, 0x27, 0xd8, 0x26, 0x91, 0x38, - 0x00, 0x33, 0x0f, 0x2a, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x06, 0x89, 0xaf, 0x85, 0x0d, 0x56, - 0x15, 0x0b, 0x12, 0xf6, 0x8f, 0x0c, 0xc3, 0x99, 0xa5, 0xfa, 0x6a, 0xce, 0xba, 0xba, 0x00, 0x43, - 0xcd, 0xc0, 0xdd, 0x23, 0x81, 0x18, 0x67, 0x45, 0xa5, 0xca, 0x4a, 0xb1, 0x80, 0xa2, 0x97, 0x60, - 0x8c, 0x5f, 0x48, 0x57, 0x1d, 0xaf, 0xd9, 0x92, 0x43, 0x7c, 0x52, 0x60, 0x8f, 0xdd, 0xd2, 0x60, - 0xd8, 0xc0, 0x3c, 0xe4, 0xa2, 0xba, 0x90, 0xd8, 0x8c, 0x79, 0x97, 0xdd, 0xe7, 0x2d, 0x98, 0xe2, - 0xcd, 0x2c, 0x44, 0x51, 0xe0, 0x6e, 0x76, 0x23, 0x12, 0xce, 0x0c, 0xb2, 0x93, 0x6e, 0x29, 0x6b, - 0xb4, 0x72, 0x47, 0x60, 0xfe, 0x56, 0x82, 0x0a, 0x3f, 0x04, 0x67, 0x44, 0xbb, 0x53, 0x49, 0x30, - 0x4e, 0x35, 0x8b, 0xbe, 0xdb, 0x82, 0xd9, 0x86, 0xef, 0x45, 0x81, 0xdf, 0x6a, 0x91, 0xa0, 0xd6, - 0xdd, 0x6c, 0xb9, 0xe1, 0x0e, 0x5f, 0xa7, 0x98, 0x6c, 0xb1, 0x93, 0x20, 0x67, 0x0e, 0x15, 0x92, - 0x98, 0xc3, 0x73, 0xf7, 0x0e, 0xe6, 0x66, 0x97, 0x72, 0x49, 0xe1, 0x82, 0x66, 0xd0, 0x2e, 0x20, - 0x7a, 0x95, 0xd6, 0x23, 0x67, 0x9b, 0xc4, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xe9, 0x7b, 0x07, 0x73, - 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc2, 0x49, 0x5a, 0x9a, 0xfa, 0xd6, 0x91, 0xfe, - 0x9b, 0x9b, 0xb9, 0x77, 0x30, 0x77, 0x72, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe, 0xcb, 0x82, - 0x33, 0xf1, 0xe7, 0x2f, 0xdf, 0xed, 0x38, 0x5e, 0x33, 0x6e, 0xb8, 0xd2, 0x7f, 0xc3, 0xf4, 0x4c, - 0x3e, 0xb3, 0x94, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xbb, 0x04, 0xa7, 0x32, 0x57, 0x0b, 0x9a, 0x82, - 0xf2, 0x2e, 0xe1, 0x5c, 0x50, 0x05, 0xd3, 0x9f, 0xe8, 0x24, 0x0c, 0xee, 0x39, 0xad, 0xae, 0xd8, - 0x28, 0x98, 0xff, 0x79, 0xb9, 0xf4, 0x92, 0x65, 0xff, 0xab, 0x32, 0x4c, 0x2e, 0xd5, 0x57, 0x1f, - 0x68, 0x17, 0xea, 0xd7, 0x50, 0xa9, 0xf0, 0x1a, 0x8a, 0x2f, 0xb5, 0x72, 0xee, 0xa5, 0xf6, 0xff, - 0x67, 0x6c, 0xa1, 0x01, 0xb6, 0x85, 0xbe, 0x2d, 0x67, 0x0b, 0x1d, 0xf1, 0xc6, 0xd9, 0xcb, 0x59, - 0x45, 0x83, 0x6c, 0x32, 0x33, 0x39, 0x96, 0xeb, 0x7e, 0xc3, 0x69, 0x25, 0x8f, 0xbe, 0x43, 0x2e, - 0xa5, 0xa3, 0x99, 0xc7, 0x06, 0x8c, 0x2d, 0x39, 0x1d, 0x67, 0xd3, 0x6d, 0xb9, 0x91, 0x4b, 0x42, - 0xf4, 0x24, 0x94, 0x9d, 0x66, 0x93, 0x71, 0x5b, 0x95, 0xc5, 0x53, 0xf7, 0x0e, 0xe6, 0xca, 0x0b, - 0x4d, 0x7a, 0xed, 0x83, 0xc2, 0xda, 0xc7, 0x14, 0x03, 0xbd, 0x1f, 0x06, 0x9a, 0x81, 0xdf, 0x99, - 0x29, 0x31, 0x4c, 0xba, 0xeb, 0x06, 0xaa, 0x81, 0xdf, 0x49, 0xa0, 0x32, 0x1c, 0xfb, 0x57, 0x4b, - 0xf0, 0xd8, 0x12, 0xe9, 0xec, 0xac, 0xd4, 0x73, 0xce, 0xef, 0x8b, 0x30, 0xd2, 0xf6, 0x3d, 0x37, - 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0xc3, 0x40, 0x27, - 0x66, 0x2a, 0xc7, 0x24, 0x43, 0xca, 0xd8, 0x49, 0x06, 0xa1, 0x18, 0xdd, 0x90, 0x04, 0x62, 0xc5, - 0x28, 0x8c, 0x9b, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x37, 0x33, 0xbd, 0xb3, 0xc5, 0x09, 0x9d, 0xb8, - 0x99, 0x29, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0x95, 0x30, 0x31, 0xb3, 0x7d, 0x6d, 0xd3, 0x71, 0x76, - 0x75, 0xab, 0x99, 0x8c, 0x89, 0x18, 0x37, 0xca, 0x50, 0xcf, 0xab, 0xfb, 0xab, 0x25, 0x40, 0x7c, - 0x08, 0xbf, 0xc9, 0x06, 0xee, 0x66, 0x7a, 0xe0, 0xfa, 0xdf, 0x12, 0x47, 0x35, 0x7a, 0xff, 0xd3, - 0x82, 0xc7, 0x96, 0x5c, 0xaf, 0x49, 0x82, 0x9c, 0x05, 0xf8, 0x70, 0xde, 0xb2, 0x87, 0x63, 0x1a, - 0x8c, 0x25, 0x36, 0x70, 0x04, 0x4b, 0xcc, 0xfe, 0x63, 0x0b, 0x10, 0xff, 0xec, 0x77, 0xdc, 0xc7, - 0xde, 0x4c, 0x7f, 0xec, 0x11, 0x2c, 0x0b, 0xfb, 0x3a, 0x4c, 0x2c, 0xb5, 0x5c, 0xe2, 0x45, 0xab, - 0xb5, 0x25, 0xdf, 0xdb, 0x72, 0xb7, 0xd1, 0xcb, 0x30, 0x11, 0xb9, 0x6d, 0xe2, 0x77, 0xa3, 0x3a, - 0x69, 0xf8, 0x1e, 0x7b, 0x49, 0x5a, 0x17, 0x07, 0x17, 0xd1, 0xbd, 0x83, 0xb9, 0x89, 0x0d, 0x03, - 0x82, 0x13, 0x98, 0xf6, 0xef, 0xd2, 0xf1, 0xf3, 0xdb, 0x1d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, - 0x4d, 0x2e, 0x71, 0x78, 0x19, 0x06, 0x22, 0x3a, 0x1e, 0x7c, 0xec, 0x2e, 0xc8, 0x8d, 0x42, 0x47, - 0xe1, 0xfe, 0xc1, 0xdc, 0xe9, 0x74, 0x0d, 0x36, 0x4e, 0xac, 0x0e, 0xfa, 0x36, 0x18, 0x0a, 0x23, - 0x27, 0xea, 0x86, 0x62, 0x34, 0x1f, 0x97, 0xa3, 0x59, 0x67, 0xa5, 0xf7, 0x0f, 0xe6, 0x26, 0x55, - 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0x0c, 0xb7, 0x49, 0x18, 0x3a, 0xdb, 0xf2, 0x36, 0x9c, - 0x14, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x04, 0x0c, 0x92, 0x20, 0xf0, 0x03, 0xb1, - 0x47, 0xc7, 0x05, 0xe2, 0xe0, 0x32, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x86, 0x05, 0x93, 0xaa, 0xaf, - 0xbc, 0xad, 0x63, 0x78, 0x15, 0x7c, 0x02, 0xa0, 0x21, 0x3f, 0x30, 0x64, 0xb7, 0xc7, 0xe8, 0x73, - 0x17, 0x32, 0x2f, 0xea, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x3f, 0xb1, - 0xe0, 0x44, 0xe2, 0x8b, 0xae, 0xbb, 0x61, 0x84, 0xde, 0x48, 0x7d, 0xd5, 0x7c, 0x7f, 0x5f, 0x45, - 0x6b, 0xb3, 0x6f, 0x52, 0x4b, 0x59, 0x96, 0x68, 0x5f, 0x74, 0x15, 0x06, 0xdd, 0x88, 0xb4, 0xe5, - 0xc7, 0x3c, 0x51, 0xf8, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0xac, 0xd2, 0x9a, 0x98, 0x13, 0xb0, 0x7f, - 0xb5, 0x0c, 0x15, 0xbe, 0x6c, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xd3, 0x50, 0x71, 0xdb, 0xed, - 0x6e, 0xe4, 0x6c, 0x8a, 0xe3, 0x7c, 0x84, 0x6f, 0xad, 0x55, 0x59, 0x88, 0x63, 0x38, 0x5a, 0x85, - 0x01, 0xd6, 0x15, 0xfe, 0x95, 0x4f, 0x66, 0x7f, 0xa5, 0xe8, 0xfb, 0x7c, 0xd5, 0x89, 0x1c, 0xce, - 0x49, 0xa9, 0x7b, 0x84, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0x9b, 0xae, 0xe7, 0x04, 0xfb, 0xb4, - 0x6c, 0xa6, 0xcc, 0x08, 0x3e, 0x5b, 0x4c, 0x70, 0x51, 0xe1, 0x73, 0xb2, 0xea, 0xc3, 0x62, 0x00, - 0xd6, 0x88, 0xce, 0x7e, 0x08, 0x2a, 0x0a, 0xf9, 0x30, 0x0c, 0xd1, 0xec, 0x47, 0x60, 0x32, 0xd1, - 0x56, 0xaf, 0xea, 0x63, 0x3a, 0x3f, 0xf5, 0x4b, 0xec, 0xc8, 0x10, 0xbd, 0x5e, 0xf6, 0xf6, 0xc4, - 0x91, 0xfb, 0x16, 0x9c, 0x6c, 0x65, 0x9c, 0x64, 0x62, 0x5e, 0xfb, 0x3f, 0xf9, 0x1e, 0x13, 0x9f, - 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0xa0, 0x3c, 0x82, 0xdf, 0xa1, 0x1b, 0xc4, 0x69, 0xe9, 0xec, - 0xf6, 0x0d, 0x51, 0x86, 0x15, 0x94, 0x9e, 0x77, 0x27, 0x55, 0xe7, 0xaf, 0x91, 0xfd, 0x3a, 0x69, - 0x91, 0x46, 0xe4, 0x07, 0xdf, 0xd0, 0xee, 0x9f, 0xe5, 0xa3, 0xcf, 0x8f, 0xcb, 0x51, 0x41, 0xa0, - 0x7c, 0x8d, 0xec, 0xf3, 0xa9, 0xd0, 0xbf, 0xae, 0x5c, 0xf8, 0x75, 0x3f, 0x6b, 0xc1, 0xb8, 0xfa, - 0xba, 0x63, 0x38, 0x17, 0x16, 0xcd, 0x73, 0xe1, 0x6c, 0xe1, 0x02, 0xcf, 0x39, 0x11, 0xbe, 0x5a, - 0x82, 0x33, 0x0a, 0x87, 0xbe, 0x0d, 0xf8, 0x1f, 0xb1, 0xaa, 0x2e, 0x41, 0xc5, 0x53, 0x52, 0x2b, - 0xcb, 0x14, 0x17, 0xc5, 0x32, 0xab, 0x18, 0x87, 0xb2, 0x78, 0x5e, 0x2c, 0x5a, 0x1a, 0xd3, 0xc5, - 0xb9, 0x42, 0x74, 0xbb, 0x08, 0xe5, 0xae, 0xdb, 0x14, 0x17, 0xcc, 0x07, 0xe4, 0x68, 0xdf, 0x5c, - 0xad, 0xde, 0x3f, 0x98, 0x7b, 0x3c, 0x4f, 0x95, 0x40, 0x6f, 0xb6, 0x70, 0xfe, 0xe6, 0x6a, 0x15, - 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xb6, 0xe4, 0x16, 0x65, 0xb7, 0x7c, 0x4f, 0xdc, 0x43, 0x4a, - 0x26, 0x8b, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, 0x2a, 0x4c, 0xed, 0x76, 0x37, 0x49, 0x8b, 0x44, 0xfc, - 0x83, 0xaf, 0x11, 0x2e, 0xb1, 0xac, 0xc4, 0x2f, 0xb3, 0x6b, 0x09, 0x38, 0x4e, 0xd5, 0xb0, 0xff, - 0x9c, 0xdd, 0x07, 0x62, 0xf4, 0x6a, 0x81, 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x8d, 0x5c, 0xce, 0xfd, - 0xac, 0x8a, 0x6b, 0x64, 0x7f, 0xc3, 0xa7, 0x9c, 0x79, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xe1, - 0x9a, 0xff, 0xb9, 0x12, 0x9c, 0x52, 0x23, 0x60, 0x30, 0x81, 0xdf, 0xec, 0x63, 0x70, 0x19, 0x46, - 0x9b, 0x64, 0xcb, 0xe9, 0xb6, 0x22, 0x25, 0x3e, 0x1f, 0xe4, 0x2a, 0x94, 0x6a, 0x5c, 0x8c, 0x75, - 0x9c, 0x43, 0x0c, 0xdb, 0xff, 0x1a, 0x65, 0x17, 0x71, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, - 0xbb, 0x6b, 0x9e, 0x80, 0x41, 0xb7, 0x4d, 0x19, 0xb3, 0x92, 0xc9, 0x6f, 0xad, 0xd2, 0x42, 0xcc, - 0x61, 0xe8, 0x7d, 0x30, 0xdc, 0xf0, 0xdb, 0x6d, 0xc7, 0x6b, 0xb2, 0x2b, 0xaf, 0xb2, 0x38, 0x4a, - 0x79, 0xb7, 0x25, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18, 0x95, - 0xc5, 0x11, 0xda, 0xd2, 0x42, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0x77, 0xfc, 0x60, 0xd7, - 0xf5, 0xb6, 0xab, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0x6f, 0x2b, 0x08, 0xd6, 0xb0, 0xd0, 0x0a, - 0x0c, 0x76, 0xfc, 0x20, 0x0a, 0x67, 0x86, 0xd8, 0x70, 0x3f, 0x9e, 0x73, 0x10, 0xf1, 0xaf, 0xad, - 0xf9, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0x5d, 0x87, 0x61, 0xe2, 0xed, 0xad, - 0x04, 0x7e, 0x7b, 0xe6, 0x44, 0x3e, 0xa5, 0x65, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, - 0x2c, 0x49, 0xa0, 0x6f, 0x83, 0x32, 0xf1, 0xf6, 0x66, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0xb7, - 0x9c, 0x20, 0x3e, 0xf3, 0x97, 0xbd, 0x3d, 0x4c, 0xeb, 0xa0, 0x8f, 0x43, 0x45, 0x1e, 0x18, 0xa1, - 0x10, 0xd6, 0x65, 0x2e, 0x58, 0x79, 0xcc, 0x60, 0xf2, 0x66, 0xd7, 0x0d, 0x48, 0x9b, 0x78, 0x51, - 0x18, 0x9f, 0x90, 0x12, 0x1a, 0xe2, 0x98, 0x1a, 0xfa, 0xb8, 0x94, 0x10, 0xaf, 0xf9, 0x5d, 0x2f, - 0x0a, 0x67, 0x2a, 0xac, 0x7b, 0x99, 0xba, 0xbb, 0x5b, 0x31, 0x5e, 0x52, 0x84, 0xcc, 0x2b, 0x63, - 0x83, 0x14, 0xfa, 0x24, 0x8c, 0xf3, 0xff, 0x5c, 0x03, 0x16, 0xce, 0x9c, 0x62, 0xb4, 0xcf, 0xe7, - 0xd3, 0xe6, 0x88, 0x8b, 0xa7, 0x04, 0xf1, 0x71, 0xbd, 0x34, 0xc4, 0x26, 0x35, 0x84, 0x61, 0xbc, - 0xe5, 0xee, 0x11, 0x8f, 0x84, 0x61, 0x2d, 0xf0, 0x37, 0xc9, 0x0c, 0xb0, 0x81, 0x39, 0x93, 0xad, - 0x31, 0xf3, 0x37, 0xc9, 0xe2, 0x34, 0xa5, 0x79, 0x5d, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x26, 0x4c, - 0xd0, 0x17, 0x9b, 0x1b, 0x13, 0x1d, 0xed, 0x45, 0x94, 0xbd, 0xab, 0xb0, 0x51, 0x09, 0x27, 0x88, - 0xa0, 0x1b, 0x30, 0x16, 0x46, 0x4e, 0x10, 0x75, 0x3b, 0x9c, 0xe8, 0xe9, 0x5e, 0x44, 0x99, 0xc2, - 0xb5, 0xae, 0x55, 0xc1, 0x06, 0x01, 0xf4, 0x1a, 0x54, 0x5a, 0xee, 0x16, 0x69, 0xec, 0x37, 0x5a, - 0x64, 0x66, 0x8c, 0x51, 0xcb, 0x3c, 0x54, 0xae, 0x4b, 0x24, 0xce, 0xe7, 0xaa, 0xbf, 0x38, 0xae, - 0x8e, 0x6e, 0xc1, 0xe9, 0x88, 0x04, 0x6d, 0xd7, 0x73, 0xe8, 0x61, 0x20, 0x9e, 0x56, 0x4c, 0x91, - 0x39, 0xce, 0x76, 0xdb, 0x39, 0x31, 0x1b, 0xa7, 0x37, 0x32, 0xb1, 0x70, 0x4e, 0x6d, 0x74, 0x17, - 0x66, 0x32, 0x20, 0x7e, 0xcb, 0x6d, 0xec, 0xcf, 0x9c, 0x64, 0x94, 0x3f, 0x2c, 0x28, 0xcf, 0x6c, - 0xe4, 0xe0, 0xdd, 0x2f, 0x80, 0xe1, 0x5c, 0xea, 0xe8, 0x06, 0x4c, 0xb2, 0x13, 0xa8, 0xd6, 0x6d, - 0xb5, 0x44, 0x83, 0x13, 0xac, 0xc1, 0xf7, 0xc9, 0xfb, 0x78, 0xd5, 0x04, 0xdf, 0x3f, 0x98, 0x83, - 0xf8, 0x1f, 0x4e, 0xd6, 0x46, 0x9b, 0x4c, 0x67, 0xd6, 0x0d, 0xdc, 0x68, 0x9f, 0x9e, 0x1b, 0xe4, - 0x6e, 0x34, 0x33, 0x59, 0x28, 0xaf, 0xd0, 0x51, 0x95, 0x62, 0x4d, 0x2f, 0xc4, 0x49, 0x82, 0xf4, - 0x48, 0x0d, 0xa3, 0xa6, 0xeb, 0xcd, 0x4c, 0xf1, 0x77, 0x89, 0x3c, 0x91, 0xea, 0xb4, 0x10, 0x73, - 0x18, 0xd3, 0x97, 0xd1, 0x1f, 0x37, 0xe8, 0xcd, 0x35, 0xcd, 0x10, 0x63, 0x7d, 0x99, 0x04, 0xe0, - 0x18, 0x87, 0x32, 0x93, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0x3a, 0x58, 0x36, 0x36, 0x3e, 0x8e, - 0x69, 0xb9, 0xbd, 0x09, 0x13, 0xea, 0x20, 0x64, 0x63, 0x82, 0xe6, 0x60, 0x90, 0xb1, 0x4f, 0x42, - 0xba, 0x56, 0xa1, 0x5d, 0x60, 0xac, 0x15, 0xe6, 0xe5, 0xac, 0x0b, 0xee, 0x5b, 0x64, 0x71, 0x3f, - 0x22, 0xfc, 0x4d, 0x5f, 0xd6, 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x43, 0xe3, - 0xd3, 0xb6, 0x8f, 0xfb, 0xe5, 0x19, 0x18, 0xd9, 0xf1, 0xc3, 0x88, 0x62, 0xb3, 0x36, 0x06, 0x63, - 0xc6, 0xf3, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x02, 0xe3, 0x0d, 0xbd, 0x01, 0x71, 0x39, 0xaa, - 0x63, 0xc4, 0x68, 0x1d, 0x9b, 0xb8, 0xe8, 0x25, 0x18, 0x61, 0x36, 0x20, 0x0d, 0xbf, 0x25, 0xb8, - 0x36, 0x79, 0xc3, 0x8f, 0xd4, 0x44, 0xf9, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x02, 0x0c, 0xd1, - 0x2e, 0xac, 0xd6, 0xc4, 0xb5, 0xa4, 0x04, 0x45, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x2f, 0x95, - 0xb4, 0x51, 0xa6, 0xef, 0x61, 0x82, 0x6a, 0x30, 0x7c, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x16, 0xfc, - 0xc7, 0x53, 0x85, 0x77, 0x14, 0xab, 0x74, 0x9b, 0x57, 0xe0, 0xb7, 0xa8, 0xf8, 0x83, 0x25, 0x19, - 0x4a, 0x31, 0xe8, 0x7a, 0x1e, 0xa5, 0x58, 0xea, 0x97, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, - 0x4b, 0x32, 0xe8, 0x0d, 0x00, 0xb9, 0xc3, 0x48, 0x53, 0xd8, 0x5e, 0x3c, 0xd3, 0x9b, 0xe8, 0x86, - 0xaa, 0xb3, 0x38, 0x41, 0xef, 0xe8, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x11, 0xe3, 0xd3, 0xd2, 0x9d, - 0x41, 0xdf, 0x41, 0x97, 0xb8, 0x13, 0x44, 0xa4, 0xb9, 0x10, 0x89, 0xc1, 0x79, 0x7f, 0x7f, 0x8f, - 0x94, 0x0d, 0xb7, 0x4d, 0xf4, 0xed, 0x20, 0x88, 0xe0, 0x98, 0x9e, 0xfd, 0x0b, 0x65, 0x98, 0xc9, - 0xeb, 0x2e, 0x5d, 0x74, 0xe4, 0xae, 0x1b, 0x2d, 0x51, 0xf6, 0xca, 0x32, 0x17, 0xdd, 0xb2, 0x28, - 0xc7, 0x0a, 0x83, 0xce, 0x7e, 0xe8, 0x6e, 0xcb, 0x37, 0xe6, 0x60, 0x3c, 0xfb, 0x75, 0x56, 0x8a, - 0x05, 0x94, 0xe2, 0x05, 0xc4, 0x09, 0x85, 0x71, 0x8f, 0xb6, 0x4a, 0x30, 0x2b, 0xc5, 0x02, 0xaa, - 0x4b, 0xbb, 0x06, 0x7a, 0x48, 0xbb, 0x8c, 0x21, 0x1a, 0x3c, 0xda, 0x21, 0x42, 0x9f, 0x02, 0xd8, - 0x72, 0x3d, 0x37, 0xdc, 0x61, 0xd4, 0x87, 0x0e, 0x4d, 0x5d, 0x31, 0x67, 0x2b, 0x8a, 0x0a, 0xd6, - 0x28, 0xa2, 0x17, 0x61, 0x54, 0x6d, 0xc0, 0xd5, 0x2a, 0xd3, 0x74, 0x6a, 0x96, 0x23, 0xf1, 0x69, - 0x54, 0xc5, 0x3a, 0x9e, 0xfd, 0x99, 0xe4, 0x7a, 0x11, 0x3b, 0x40, 0x1b, 0x5f, 0xab, 0xdf, 0xf1, - 0x2d, 0x15, 0x8f, 0xaf, 0xfd, 0xf5, 0x32, 0x4c, 0x1a, 0x8d, 0x75, 0xc3, 0x3e, 0xce, 0xac, 0x2b, - 0xf4, 0x00, 0x77, 0x22, 0x22, 0xf6, 0x9f, 0xdd, 0x7b, 0xab, 0xe8, 0x87, 0x3c, 0xdd, 0x01, 0xbc, - 0x3e, 0xfa, 0x14, 0x54, 0x5a, 0x4e, 0xc8, 0x24, 0x67, 0x44, 0xec, 0xbb, 0x7e, 0x88, 0xc5, 0x0f, - 0x13, 0x27, 0x8c, 0xb4, 0x5b, 0x93, 0xd3, 0x8e, 0x49, 0xd2, 0x9b, 0x86, 0xf2, 0x27, 0xd2, 0x7a, - 0x4c, 0x75, 0x82, 0x32, 0x31, 0xfb, 0x98, 0xc3, 0xd0, 0x4b, 0x30, 0x16, 0x10, 0xb6, 0x2a, 0x96, - 0x28, 0x37, 0xc7, 0x96, 0xd9, 0x60, 0xcc, 0xf6, 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0xf8, 0x6d, 0x30, - 0x54, 0xf0, 0x36, 0x78, 0x0a, 0x86, 0xd9, 0x0f, 0xb5, 0x02, 0xd4, 0x6c, 0xac, 0xf2, 0x62, 0x2c, - 0xe1, 0xc9, 0x05, 0x33, 0xd2, 0xdf, 0x82, 0xa1, 0xaf, 0x0f, 0xb1, 0xa8, 0x99, 0x96, 0x79, 0x84, - 0x9f, 0x72, 0x62, 0xc9, 0x63, 0x09, 0xb3, 0xdf, 0x0f, 0x13, 0x55, 0x87, 0xb4, 0x7d, 0x6f, 0xd9, - 0x6b, 0x76, 0x7c, 0xd7, 0x8b, 0xd0, 0x0c, 0x0c, 0xb0, 0x4b, 0x84, 0x1f, 0x01, 0x03, 0xb4, 0x21, - 0xcc, 0x4a, 0xec, 0x6d, 0x38, 0x55, 0xf5, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0xb9, 0x50, 0x5b, 0xd5, - 0xde, 0xd7, 0xeb, 0xf2, 0x7d, 0xc7, 0x8d, 0xb6, 0x32, 0x8f, 0x5e, 0xad, 0x26, 0x67, 0x6b, 0x57, - 0xdc, 0x16, 0xc9, 0x91, 0x82, 0xfc, 0xd5, 0x92, 0xd1, 0x52, 0x8c, 0xaf, 0xb4, 0x5a, 0x56, 0xae, - 0x56, 0xeb, 0x75, 0x18, 0xd9, 0x72, 0x49, 0xab, 0x89, 0xc9, 0x96, 0x58, 0x89, 0x4f, 0xe6, 0xdb, - 0xa1, 0xac, 0x50, 0x4c, 0x29, 0xf5, 0xe2, 0xaf, 0xc3, 0x15, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x17, - 0xa6, 0xe4, 0x83, 0x41, 0x42, 0xc5, 0xba, 0x7c, 0xaa, 0xe8, 0x15, 0x62, 0x12, 0x3f, 0x79, 0xef, - 0x60, 0x6e, 0x0a, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0xfa, 0x1c, 0x6c, 0xd3, 0x13, 0x78, 0x80, 0x0d, - 0x3f, 0x7b, 0x0e, 0xb2, 0x97, 0x2d, 0x2b, 0xb5, 0x7f, 0xdc, 0x82, 0x47, 0x52, 0x23, 0x23, 0x5e, - 0xf8, 0x47, 0x3c, 0x0b, 0xc9, 0x17, 0x77, 0xa9, 0xf7, 0x8b, 0xdb, 0xfe, 0x3b, 0x16, 0x9c, 0x5c, - 0x6e, 0x77, 0xa2, 0xfd, 0xaa, 0x6b, 0xaa, 0xa0, 0x3e, 0x04, 0x43, 0x6d, 0xd2, 0x74, 0xbb, 0x6d, - 0x31, 0x73, 0x73, 0xf2, 0x94, 0x5a, 0x63, 0xa5, 0xf7, 0x0f, 0xe6, 0xc6, 0xeb, 0x91, 0x1f, 0x38, - 0xdb, 0x84, 0x17, 0x60, 0x81, 0xce, 0xce, 0x7a, 0xf7, 0x2d, 0x72, 0xdd, 0x6d, 0xbb, 0xd2, 0xae, - 0xa8, 0x50, 0x66, 0x37, 0x2f, 0x07, 0x74, 0xfe, 0xf5, 0xae, 0xe3, 0x45, 0x6e, 0xb4, 0x2f, 0xb4, - 0x47, 0x92, 0x08, 0x8e, 0xe9, 0xd9, 0x5f, 0xb3, 0x60, 0x52, 0xae, 0xfb, 0x85, 0x66, 0x33, 0x20, - 0x61, 0x88, 0x66, 0xa1, 0xe4, 0x76, 0x44, 0x2f, 0x41, 0xf4, 0xb2, 0xb4, 0x5a, 0xc3, 0x25, 0xb7, - 0x23, 0xd9, 0x32, 0x76, 0x10, 0x96, 0x4d, 0x45, 0xda, 0x55, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x08, - 0x23, 0x9e, 0xdf, 0xe4, 0xb6, 0x5d, 0xfc, 0x4a, 0x63, 0x0b, 0x6c, 0x5d, 0x94, 0x61, 0x05, 0x45, - 0x35, 0xa8, 0x70, 0xb3, 0xa7, 0x78, 0xd1, 0xf6, 0x65, 0x3c, 0xc5, 0xbe, 0x6c, 0x43, 0xd6, 0xc4, - 0x31, 0x11, 0xfb, 0x57, 0x2c, 0x18, 0x93, 0x5f, 0xd6, 0x27, 0xcf, 0x49, 0xb7, 0x56, 0xcc, 0x6f, - 0xc6, 0x5b, 0x8b, 0xf2, 0x8c, 0x0c, 0x62, 0xb0, 0x8a, 0xe5, 0x43, 0xb1, 0x8a, 0x97, 0x61, 0xd4, - 0xe9, 0x74, 0x6a, 0x26, 0x9f, 0xc9, 0x96, 0xd2, 0x42, 0x5c, 0x8c, 0x75, 0x1c, 0xfb, 0xc7, 0x4a, - 0x30, 0x21, 0xbf, 0xa0, 0xde, 0xdd, 0x0c, 0x49, 0x84, 0x36, 0xa0, 0xe2, 0xf0, 0x59, 0x22, 0x72, - 0x91, 0x3f, 0x91, 0x2d, 0x47, 0x30, 0xa6, 0x34, 0xbe, 0xf0, 0x17, 0x64, 0x6d, 0x1c, 0x13, 0x42, - 0x2d, 0x98, 0xf6, 0xfc, 0x88, 0x1d, 0xfe, 0x0a, 0x5e, 0xa4, 0xda, 0x49, 0x52, 0x3f, 0x23, 0xa8, - 0x4f, 0xaf, 0x27, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x2c, 0x65, 0x33, 0xe5, 0x7c, 0x61, 0x80, 0x3e, - 0x71, 0xd9, 0xa2, 0x19, 0xfb, 0x97, 0x2d, 0xa8, 0x48, 0xb4, 0xe3, 0xd0, 0xe2, 0xad, 0xc1, 0x70, - 0xc8, 0x26, 0x41, 0x0e, 0x8d, 0x5d, 0xd4, 0x71, 0x3e, 0x5f, 0xf1, 0x9d, 0xc6, 0xff, 0x87, 0x58, - 0xd2, 0x60, 0xa2, 0x79, 0xd5, 0xfd, 0x77, 0x88, 0x68, 0x5e, 0xf5, 0x27, 0xe7, 0x52, 0xfa, 0x43, - 0xd6, 0x67, 0x4d, 0xd6, 0x45, 0x59, 0xaf, 0x4e, 0x40, 0xb6, 0xdc, 0xbb, 0x49, 0xd6, 0xab, 0xc6, - 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0x60, 0xac, 0x21, 0x65, 0xb2, 0xf1, 0x0e, 0xbf, 0x50, 0xa8, 0x1f, - 0x50, 0xaa, 0x24, 0x2e, 0x0b, 0x59, 0xd2, 0xea, 0x63, 0x83, 0x9a, 0x69, 0x46, 0x50, 0xee, 0x65, - 0x46, 0x10, 0xd3, 0xcd, 0x57, 0xaa, 0xff, 0x84, 0x05, 0x43, 0x5c, 0x16, 0xd7, 0x9f, 0x28, 0x54, - 0xd3, 0xac, 0xc5, 0x63, 0x77, 0x8b, 0x16, 0x0a, 0x4d, 0x19, 0x5a, 0x83, 0x0a, 0xfb, 0xc1, 0x64, - 0x89, 0xe5, 0x7c, 0xab, 0x7b, 0xde, 0xaa, 0xde, 0xc1, 0x5b, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x3f, - 0x5a, 0xa6, 0xa7, 0x5b, 0x8c, 0x6a, 0x5c, 0xfa, 0xd6, 0xc3, 0xbb, 0xf4, 0x4b, 0x0f, 0xeb, 0xd2, - 0xdf, 0x86, 0xc9, 0x86, 0xa6, 0x87, 0x8b, 0x67, 0xf2, 0x62, 0xe1, 0x22, 0xd1, 0x54, 0x76, 0x5c, - 0xca, 0xb2, 0x64, 0x12, 0xc1, 0x49, 0xaa, 0xe8, 0x3b, 0x60, 0x8c, 0xcf, 0xb3, 0x68, 0x85, 0x5b, - 0x62, 0xbc, 0x2f, 0x7f, 0xbd, 0xe8, 0x4d, 0x70, 0xa9, 0x9c, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x3f, - 0xb1, 0x00, 0x2d, 0x77, 0x76, 0x48, 0x9b, 0x04, 0x4e, 0x2b, 0x16, 0xa7, 0xff, 0xa0, 0x05, 0x33, - 0x24, 0x55, 0xbc, 0xe4, 0xb7, 0xdb, 0xe2, 0xd1, 0x92, 0xf3, 0xae, 0x5e, 0xce, 0xa9, 0xa3, 0xdc, - 0x12, 0x66, 0xf2, 0x30, 0x70, 0x6e, 0x7b, 0x68, 0x0d, 0x4e, 0xf0, 0x5b, 0x52, 0x01, 0x34, 0xdb, - 0xeb, 0x47, 0x05, 0xe1, 0x13, 0x1b, 0x69, 0x14, 0x9c, 0x55, 0xcf, 0xfe, 0x9e, 0x31, 0xc8, 0xed, - 0xc5, 0xbb, 0x7a, 0x84, 0x77, 0xf5, 0x08, 0xef, 0xea, 0x11, 0xde, 0xd5, 0x23, 0xbc, 0xab, 0x47, - 0xf8, 0x96, 0xd7, 0x23, 0xfc, 0x65, 0x0b, 0x4e, 0xa9, 0x6b, 0xc0, 0x78, 0xf8, 0x7e, 0x16, 0x4e, - 0xf0, 0xed, 0xb6, 0xd4, 0x72, 0xdc, 0xf6, 0x06, 0x69, 0x77, 0x5a, 0x4e, 0x24, 0xb5, 0xee, 0x97, - 0x33, 0x57, 0x6e, 0xc2, 0x62, 0xd5, 0xa8, 0xb8, 0xf8, 0x08, 0xbd, 0x9e, 0x32, 0x00, 0x38, 0xab, - 0x19, 0xfb, 0x17, 0x46, 0x60, 0x70, 0x79, 0x8f, 0x78, 0xd1, 0x31, 0x3c, 0x11, 0x1a, 0x30, 0xe1, - 0x7a, 0x7b, 0x7e, 0x6b, 0x8f, 0x34, 0x39, 0xfc, 0x30, 0x2f, 0xd9, 0xd3, 0x82, 0xf4, 0xc4, 0xaa, - 0x41, 0x02, 0x27, 0x48, 0x3e, 0x0c, 0x69, 0xf2, 0x15, 0x18, 0xe2, 0x87, 0xb8, 0x10, 0x25, 0x67, - 0x9e, 0xd9, 0x6c, 0x10, 0xc5, 0xd5, 0x14, 0x4b, 0xba, 0xf9, 0x25, 0x21, 0xaa, 0xa3, 0xcf, 0xc0, - 0xc4, 0x96, 0x1b, 0x84, 0xd1, 0x86, 0xdb, 0x26, 0x61, 0xe4, 0xb4, 0x3b, 0x0f, 0x20, 0x3d, 0x56, - 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x1b, 0xc6, 0x5b, 0x8e, 0xde, 0xd4, 0xf0, 0xa1, - 0x9b, 0x52, 0xb7, 0xc3, 0x75, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed, 0xd4, 0x60, 0x02, 0xd0, 0x11, - 0x26, 0x16, 0x50, 0xdb, 0x89, 0x4b, 0x3e, 0x39, 0x8c, 0x32, 0x3a, 0xcc, 0x40, 0xb6, 0x62, 0x32, - 0x3a, 0x9a, 0x19, 0xec, 0xa7, 0xa1, 0x42, 0xe8, 0x10, 0x52, 0xc2, 0xe2, 0x82, 0xb9, 0xd4, 0x5f, - 0x5f, 0xd7, 0xdc, 0x46, 0xe0, 0x9b, 0x72, 0xfb, 0x65, 0x49, 0x09, 0xc7, 0x44, 0xd1, 0x12, 0x0c, - 0x85, 0x24, 0x70, 0x49, 0x28, 0xae, 0x9a, 0x82, 0x69, 0x64, 0x68, 0xdc, 0xb7, 0x84, 0xff, 0xc6, - 0xa2, 0x2a, 0x5d, 0x5e, 0x0e, 0x13, 0x69, 0xb2, 0xcb, 0x40, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, - 0x28, 0x7a, 0x0d, 0x86, 0x03, 0xd2, 0x62, 0x8a, 0xa1, 0xf1, 0xfe, 0x17, 0x39, 0xd7, 0x33, 0xf1, - 0x7a, 0x58, 0x12, 0x40, 0xd7, 0x00, 0x05, 0x84, 0x32, 0x4a, 0xae, 0xb7, 0xad, 0xcc, 0x46, 0xc5, - 0x41, 0xab, 0x18, 0x52, 0x1c, 0x63, 0x48, 0x37, 0x1f, 0x9c, 0x51, 0x0d, 0x5d, 0x81, 0x69, 0x55, - 0xba, 0xea, 0x85, 0x91, 0x43, 0x0f, 0xb8, 0x49, 0x46, 0x4b, 0xc9, 0x29, 0x70, 0x12, 0x01, 0xa7, - 0xeb, 0xd8, 0x5f, 0xb2, 0x80, 0x8f, 0xf3, 0x31, 0xbc, 0xce, 0x5f, 0x35, 0x5f, 0xe7, 0x67, 0x72, - 0x67, 0x2e, 0xe7, 0x65, 0xfe, 0x25, 0x0b, 0x46, 0xb5, 0x99, 0x8d, 0xd7, 0xac, 0x55, 0xb0, 0x66, - 0xbb, 0x30, 0x45, 0x57, 0xfa, 0x8d, 0xcd, 0x90, 0x04, 0x7b, 0xa4, 0xc9, 0x16, 0x66, 0xe9, 0xc1, - 0x16, 0xa6, 0x32, 0x51, 0xbb, 0x9e, 0x20, 0x88, 0x53, 0x4d, 0xd8, 0x9f, 0x96, 0x5d, 0x55, 0x16, - 0x7d, 0x0d, 0x35, 0xe7, 0x09, 0x8b, 0x3e, 0x35, 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xc7, 0x0f, - 0xa3, 0xa4, 0x45, 0xdf, 0x55, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xcf, 0x03, 0x2c, 0xdf, 0x25, 0x0d, - 0xbe, 0x62, 0xf5, 0xc7, 0x83, 0x95, 0xff, 0x78, 0xb0, 0x7f, 0xcb, 0x82, 0x89, 0x95, 0x25, 0xe3, - 0xe6, 0x9a, 0x07, 0xe0, 0x2f, 0x9e, 0xdb, 0xb7, 0xd7, 0xa5, 0x3a, 0x9c, 0x6b, 0x34, 0x55, 0x29, - 0xd6, 0x30, 0xd0, 0x19, 0x28, 0xb7, 0xba, 0x9e, 0x10, 0x1f, 0x0e, 0xd3, 0xeb, 0xf1, 0x7a, 0xd7, - 0xc3, 0xb4, 0x4c, 0x73, 0x29, 0x28, 0xf7, 0xed, 0x52, 0xd0, 0xd3, 0xb5, 0x1f, 0xcd, 0xc1, 0xe0, - 0x9d, 0x3b, 0x6e, 0x93, 0x3b, 0x50, 0x0a, 0x55, 0xfd, 0xed, 0xdb, 0xab, 0xd5, 0x10, 0xf3, 0x72, - 0xfb, 0x0b, 0x65, 0x98, 0x5d, 0x69, 0x91, 0xbb, 0x6f, 0xd3, 0x89, 0xb4, 0x5f, 0x87, 0x88, 0xc3, - 0x09, 0x62, 0x0e, 0xeb, 0xf4, 0xd2, 0x7b, 0x3c, 0xb6, 0x60, 0x98, 0x1b, 0xb4, 0x49, 0x97, 0xd2, - 0x57, 0xb2, 0x5a, 0xcf, 0x1f, 0x90, 0x79, 0x6e, 0x18, 0x27, 0x3c, 0xe2, 0xd4, 0x85, 0x29, 0x4a, - 0xb1, 0x24, 0x3e, 0xfb, 0x32, 0x8c, 0xe9, 0x98, 0x87, 0x72, 0x3f, 0xfb, 0x0b, 0x65, 0x98, 0xa2, - 0x3d, 0x78, 0xa8, 0x13, 0x71, 0x33, 0x3d, 0x11, 0x47, 0xed, 0x82, 0xd4, 0x7b, 0x36, 0xde, 0x48, - 0xce, 0xc6, 0xe5, 0xbc, 0xd9, 0x38, 0xee, 0x39, 0xf8, 0x6e, 0x0b, 0x4e, 0xac, 0xb4, 0xfc, 0xc6, - 0x6e, 0xc2, 0x4d, 0xe8, 0x45, 0x18, 0xa5, 0xc7, 0x71, 0x68, 0x78, 0xb0, 0x1b, 0x31, 0x0d, 0x04, - 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0x9b, 0x37, 0x57, 0xab, 0x59, 0xa1, 0x10, 0x04, 0x08, 0xeb, 0x78, - 0xf6, 0xaf, 0x5b, 0x70, 0xf6, 0xca, 0xd2, 0x72, 0xbc, 0x14, 0x53, 0xd1, 0x18, 0x2e, 0xc0, 0x50, - 0xa7, 0xa9, 0x75, 0x25, 0x16, 0xaf, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x9d, 0x12, 0x69, 0xe4, 0x26, - 0xc0, 0x15, 0x5c, 0x5b, 0x12, 0xe7, 0xae, 0xd4, 0xa6, 0x58, 0xb9, 0xda, 0x94, 0xf7, 0xc1, 0x30, - 0xbd, 0x17, 0xdc, 0x86, 0xec, 0x37, 0x57, 0xd0, 0xf2, 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x58, 0x70, - 0xe2, 0x8a, 0x1b, 0xd1, 0x4b, 0x3b, 0x19, 0x6e, 0x80, 0xde, 0xda, 0xa1, 0x1b, 0xf9, 0xc1, 0x7e, - 0x32, 0xdc, 0x00, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x73, 0x99, 0x85, 0x76, 0xc9, 0xd4, - 0x5f, 0x61, 0x51, 0x8e, 0x15, 0x06, 0x1d, 0xaf, 0xa6, 0x1b, 0x30, 0xd1, 0xdf, 0xbe, 0x38, 0xb8, - 0xd5, 0x78, 0x55, 0x25, 0x00, 0xc7, 0x38, 0xf6, 0x1f, 0x59, 0x30, 0x77, 0xa5, 0xd5, 0x0d, 0x23, - 0x12, 0x6c, 0x85, 0x39, 0x87, 0xee, 0xf3, 0x50, 0x21, 0x52, 0xd0, 0x2e, 0x7a, 0xad, 0x18, 0x51, - 0x25, 0x81, 0xe7, 0x51, 0x0f, 0x14, 0x5e, 0x1f, 0xbe, 0x8c, 0x87, 0x73, 0x46, 0x5b, 0x01, 0x44, - 0xf4, 0xb6, 0xf4, 0x30, 0x10, 0xcc, 0x9f, 0x7c, 0x39, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x1f, 0xb7, - 0xe0, 0x94, 0xfa, 0xe0, 0x77, 0xdc, 0x67, 0xda, 0x5f, 0x29, 0xc1, 0xf8, 0xd5, 0x8d, 0x8d, 0xda, - 0x15, 0x12, 0x69, 0xab, 0xb2, 0x58, 0x7d, 0x8e, 0x35, 0x2d, 0x60, 0xd1, 0x1b, 0xb1, 0x1b, 0xb9, - 0xad, 0x79, 0x1e, 0x4d, 0x68, 0x7e, 0xd5, 0x8b, 0x6e, 0x04, 0xf5, 0x28, 0x70, 0xbd, 0xed, 0xcc, - 0x95, 0x2e, 0x79, 0x96, 0x72, 0x1e, 0xcf, 0x82, 0x9e, 0x87, 0x21, 0x16, 0xce, 0x48, 0x4e, 0xc2, - 0xa3, 0xea, 0x89, 0xc5, 0x4a, 0xef, 0x1f, 0xcc, 0x55, 0x6e, 0xe2, 0x55, 0xfe, 0x07, 0x0b, 0x54, - 0x74, 0x13, 0x46, 0x77, 0xa2, 0xa8, 0x73, 0x95, 0x38, 0x4d, 0x12, 0xc8, 0x53, 0xf6, 0x5c, 0xd6, - 0x29, 0x4b, 0x07, 0x81, 0xa3, 0xc5, 0x07, 0x53, 0x5c, 0x16, 0x62, 0x9d, 0x8e, 0x5d, 0x07, 0x88, - 0x61, 0x47, 0xa4, 0x00, 0xb1, 0x37, 0xa0, 0x42, 0x3f, 0x77, 0xa1, 0xe5, 0x3a, 0xc5, 0x2a, 0xe6, - 0xa7, 0xa1, 0x22, 0x15, 0xc8, 0xa1, 0xf0, 0xb5, 0x66, 0x37, 0x92, 0xd4, 0x2f, 0x87, 0x38, 0x86, - 0xdb, 0x5b, 0x70, 0x92, 0x99, 0x03, 0x3a, 0xd1, 0x8e, 0xb1, 0xfa, 0x7a, 0x4f, 0xf3, 0x33, 0xe2, - 0xc5, 0xc6, 0xfb, 0x3c, 0xa3, 0xb9, 0x33, 0x8e, 0x49, 0x8a, 0xf1, 0xeb, 0xcd, 0xfe, 0xfa, 0x00, - 0x3c, 0xba, 0x5a, 0xcf, 0x0f, 0xc7, 0xf1, 0x12, 0x8c, 0x71, 0x46, 0x90, 0x4e, 0xba, 0xd3, 0x12, - 0xed, 0x2a, 0xd9, 0xe6, 0x86, 0x06, 0xc3, 0x06, 0x26, 0x3a, 0x0b, 0x65, 0xf7, 0x4d, 0x2f, 0xe9, - 0xec, 0xb3, 0xfa, 0xfa, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0x79, 0x4a, 0x7e, 0x58, 0x2b, 0xb0, 0xe2, - 0x2b, 0x5f, 0x85, 0x09, 0x37, 0x6c, 0x84, 0xee, 0xaa, 0x47, 0x77, 0xa0, 0xb6, 0x87, 0x95, 0x34, - 0x81, 0x76, 0x5a, 0x41, 0x71, 0x02, 0x5b, 0xbb, 0x39, 0x06, 0xfb, 0xe6, 0x4b, 0x7b, 0x3a, 0x1f, - 0xd3, 0x83, 0xbd, 0xc3, 0xbe, 0x2e, 0x64, 0x42, 0x6a, 0x71, 0xb0, 0xf3, 0x0f, 0x0e, 0xb1, 0x84, - 0xd1, 0xa7, 0x5a, 0x63, 0xc7, 0xe9, 0x2c, 0x74, 0xa3, 0x9d, 0xaa, 0x1b, 0x36, 0xfc, 0x3d, 0x12, - 0xec, 0xb3, 0x57, 0xf6, 0x48, 0xfc, 0x54, 0x53, 0x80, 0xa5, 0xab, 0x0b, 0x35, 0x8a, 0x89, 0xd3, - 0x75, 0xd0, 0x02, 0x4c, 0xca, 0xc2, 0x3a, 0x09, 0xd9, 0xe1, 0x3e, 0xca, 0xc8, 0x28, 0xf7, 0x1b, - 0x51, 0xac, 0x88, 0x24, 0xf1, 0x4d, 0xd6, 0x15, 0x8e, 0x82, 0x75, 0xfd, 0x10, 0x8c, 0xbb, 0x9e, - 0x1b, 0xb9, 0x4e, 0xe4, 0x73, 0x0d, 0x0b, 0x7f, 0x50, 0x33, 0xd1, 0xf1, 0xaa, 0x0e, 0xc0, 0x26, - 0x9e, 0xfd, 0x5f, 0x06, 0x60, 0x9a, 0x4d, 0xdb, 0xbb, 0x2b, 0xec, 0x5b, 0x69, 0x85, 0xdd, 0x4c, - 0xaf, 0xb0, 0xa3, 0xe0, 0xc9, 0x1f, 0x78, 0x99, 0x7d, 0x06, 0x2a, 0xca, 0xe3, 0x48, 0xba, 0x1c, - 0x5a, 0x39, 0x2e, 0x87, 0xbd, 0xef, 0x65, 0x69, 0xb4, 0x55, 0xce, 0x34, 0xda, 0xfa, 0xb2, 0x05, - 0xb1, 0xca, 0x00, 0xbd, 0x0e, 0x95, 0x8e, 0xcf, 0x6c, 0x11, 0x03, 0x69, 0xe0, 0xfb, 0xde, 0x42, - 0x9d, 0x03, 0x8f, 0x48, 0x14, 0xf0, 0x51, 0xa8, 0xc9, 0xaa, 0x38, 0xa6, 0x82, 0xae, 0xc1, 0x70, - 0x27, 0x20, 0xf5, 0x88, 0x85, 0xe7, 0xe8, 0x9f, 0x20, 0x5f, 0x35, 0xbc, 0x22, 0x96, 0x14, 0xec, - 0xff, 0x6a, 0xc1, 0x54, 0x12, 0x15, 0x7d, 0x18, 0x06, 0xc8, 0x5d, 0xd2, 0x10, 0xfd, 0xcd, 0xbc, - 0x64, 0x63, 0xa1, 0x03, 0x1f, 0x00, 0xfa, 0x1f, 0xb3, 0x5a, 0xe8, 0x2a, 0x0c, 0xd3, 0x1b, 0xf6, - 0x8a, 0x0a, 0x0d, 0xf5, 0x78, 0xde, 0x2d, 0xad, 0x58, 0x15, 0xde, 0x39, 0x51, 0x84, 0x65, 0x75, - 0x66, 0x29, 0xd5, 0xe8, 0xd4, 0xe9, 0xe3, 0x25, 0x2a, 0x7a, 0x63, 0x6f, 0x2c, 0xd5, 0x38, 0x92, - 0xa0, 0xc6, 0x2d, 0xa5, 0x64, 0x21, 0x8e, 0x89, 0xd8, 0x3f, 0x67, 0x01, 0x70, 0xc3, 0x30, 0xc7, - 0xdb, 0x26, 0xc7, 0x20, 0x27, 0xaf, 0xc2, 0x40, 0xd8, 0x21, 0x8d, 0x22, 0x33, 0xd9, 0xb8, 0x3f, - 0xf5, 0x0e, 0x69, 0xc4, 0x2b, 0x8e, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xbd, 0x00, 0x13, 0x31, 0xda, - 0x6a, 0x44, 0xda, 0xe8, 0x59, 0x23, 0x4c, 0xc1, 0x99, 0x44, 0x98, 0x82, 0x0a, 0xc3, 0xd6, 0x44, - 0xb2, 0x9f, 0x81, 0x72, 0xdb, 0xb9, 0x2b, 0x64, 0x6e, 0x4f, 0x17, 0x77, 0x83, 0xd2, 0x9f, 0x5f, - 0x73, 0xee, 0xf2, 0x67, 0xe9, 0xd3, 0x72, 0x87, 0xac, 0x39, 0x77, 0xef, 0x73, 0x63, 0x58, 0x76, - 0x4a, 0x5f, 0x77, 0xc3, 0xe8, 0x73, 0xff, 0x39, 0xfe, 0xcf, 0xf6, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, - 0x7a, 0xc2, 0xe6, 0xa9, 0xaf, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0xf5, 0xd1, 0x96, 0xeb, 0xa1, - 0xb7, 0x60, 0x58, 0x98, 0x24, 0x8a, 0xb0, 0x40, 0x97, 0xfa, 0x68, 0x4f, 0x58, 0x34, 0xf2, 0x36, - 0x2f, 0xc9, 0x67, 0xb7, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x15, 0x0b, 0x26, 0xc4, 0x6f, - 0x4c, 0xde, 0xec, 0x92, 0x30, 0x12, 0x6c, 0xe9, 0x07, 0xfb, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, - 0x41, 0x79, 0xcf, 0x98, 0xc0, 0x9e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x3d, 0x0b, 0x4e, 0xb6, 0x9d, - 0xbb, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x6a, 0xff, 0xc3, 0xfd, 0x4d, 0x7f, 0xaa, - 0x3a, 0xef, 0xa4, 0xd4, 0x3f, 0x9e, 0xcc, 0x42, 0xe9, 0xd9, 0xd5, 0xcc, 0x7e, 0xcd, 0x6e, 0xc1, - 0x88, 0x5c, 0x6f, 0x19, 0xc2, 0x8d, 0xaa, 0xce, 0x73, 0x1f, 0xda, 0x22, 0x54, 0x77, 0xff, 0xa7, - 0xed, 0x88, 0xb5, 0xf6, 0x50, 0xdb, 0xf9, 0x0c, 0x8c, 0xe9, 0x6b, 0xec, 0xa1, 0xb6, 0xf5, 0x26, - 0x9c, 0xc8, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70, 0x26, 0x77, 0x7d, 0x3c, 0xcc, 0x86, 0xed, - 0xaf, 0x58, 0xfa, 0x39, 0x78, 0x0c, 0xca, 0x8a, 0x25, 0x53, 0x59, 0x71, 0xae, 0x78, 0xe7, 0xe4, - 0x68, 0x2c, 0xde, 0xd0, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x1a, 0x0c, 0xb5, 0x68, 0x89, 0x34, 0x6c, - 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x99, 0x64, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, 0xb4, 0x60, 0xe0, - 0x18, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x6c, 0x2e, 0x69, 0x11, 0xb1, 0x78, 0x1e, 0x3b, 0x77, 0x96, - 0xef, 0x46, 0xc4, 0x0b, 0xd9, 0x8d, 0x9c, 0x39, 0x30, 0x3f, 0x65, 0xc1, 0x89, 0xeb, 0xbe, 0xd3, - 0x5c, 0x74, 0x5a, 0x8e, 0xd7, 0x20, 0xc1, 0xaa, 0xb7, 0x7d, 0x28, 0xab, 0xec, 0x52, 0x4f, 0xab, - 0xec, 0x25, 0x69, 0xd4, 0x34, 0x90, 0x3f, 0x7f, 0x94, 0x93, 0x4e, 0x06, 0x6e, 0x31, 0xcc, 0x6f, - 0x77, 0x00, 0xe9, 0xbd, 0x14, 0x3e, 0x32, 0x18, 0x86, 0x5d, 0xde, 0x5f, 0x31, 0x89, 0x4f, 0x66, - 0x73, 0xb8, 0xa9, 0xcf, 0xd3, 0xbc, 0x3f, 0x78, 0x01, 0x96, 0x84, 0xec, 0x97, 0x20, 0xd3, 0xd1, - 0xbe, 0xb7, 0x5c, 0xc2, 0xfe, 0x38, 0x4c, 0xb3, 0x9a, 0x87, 0x94, 0x0c, 0xd8, 0x09, 0x69, 0x6a, - 0x46, 0x08, 0x3e, 0xfb, 0xf3, 0x16, 0x4c, 0xae, 0x27, 0x22, 0x93, 0x5d, 0x60, 0xfa, 0xd7, 0x0c, - 0x21, 0x7e, 0x9d, 0x95, 0x62, 0x01, 0x3d, 0x72, 0x21, 0xd7, 0x9f, 0x5b, 0x10, 0xc7, 0xbe, 0x38, - 0x06, 0xf6, 0x6d, 0xc9, 0x60, 0xdf, 0x32, 0x19, 0x59, 0xd5, 0x9d, 0x3c, 0xee, 0x0d, 0x5d, 0x53, - 0x51, 0xa1, 0x0a, 0x78, 0xd8, 0x98, 0x0c, 0x5f, 0x8a, 0x13, 0x66, 0xe8, 0x28, 0x19, 0x27, 0xca, - 0xfe, 0xed, 0x12, 0x20, 0x85, 0xdb, 0x77, 0xd4, 0xaa, 0x74, 0x8d, 0xa3, 0x89, 0x5a, 0xb5, 0x07, - 0x88, 0x59, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb2, 0xae, 0x10, 0xeb, 0x1d, 0xce, 0x3c, 0x61, 0x56, - 0x34, 0x89, 0xae, 0xa7, 0xa8, 0xe1, 0x8c, 0x16, 0x34, 0xcb, 0x90, 0xc1, 0x7e, 0x2d, 0x43, 0x86, - 0x7a, 0xf8, 0xc1, 0xfd, 0xac, 0x05, 0xe3, 0x6a, 0x98, 0xde, 0x21, 0x56, 0xea, 0xaa, 0x3f, 0x39, - 0x07, 0x68, 0x4d, 0xeb, 0x32, 0xbb, 0x58, 0xbe, 0x9d, 0xf9, 0x33, 0x3a, 0x2d, 0xf7, 0x2d, 0xa2, - 0x62, 0x06, 0xce, 0x09, 0xff, 0x44, 0x51, 0x7a, 0xff, 0x60, 0x6e, 0x5c, 0xfd, 0xe3, 0x31, 0x8a, - 0xe3, 0x2a, 0xf4, 0x48, 0x9e, 0x4c, 0x2c, 0x45, 0xf4, 0x22, 0x0c, 0x76, 0x76, 0x9c, 0x90, 0x24, - 0xbc, 0x79, 0x06, 0x6b, 0xb4, 0xf0, 0xfe, 0xc1, 0xdc, 0x84, 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xee, - 0x3f, 0x16, 0x58, 0x7a, 0x71, 0xf6, 0x8c, 0x05, 0xf6, 0x27, 0x16, 0x0c, 0xac, 0xfb, 0xcd, 0xe3, - 0x38, 0x02, 0x5e, 0x35, 0x8e, 0x80, 0xc7, 0xf2, 0xc2, 0xc7, 0xe7, 0xee, 0xfe, 0x95, 0xc4, 0xee, - 0x3f, 0x97, 0x4b, 0xa1, 0x78, 0xe3, 0xb7, 0x61, 0x94, 0x05, 0xa5, 0x17, 0x9e, 0x4b, 0xcf, 0x1b, - 0x1b, 0x7e, 0x2e, 0xb1, 0xe1, 0x27, 0x35, 0x54, 0x6d, 0xa7, 0x3f, 0x05, 0xc3, 0xc2, 0x15, 0x26, - 0xe9, 0x16, 0x2a, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x51, 0x06, 0x23, 0x08, 0x3e, 0xfa, 0x65, 0x0b, - 0xe6, 0x03, 0x6e, 0x22, 0xdb, 0xac, 0x76, 0x03, 0xd7, 0xdb, 0xae, 0x37, 0x76, 0x48, 0xb3, 0xdb, - 0x72, 0xbd, 0xed, 0xd5, 0x6d, 0xcf, 0x57, 0xc5, 0xcb, 0x77, 0x49, 0xa3, 0xcb, 0xd4, 0x6e, 0x3d, - 0x22, 0xee, 0x2b, 0x53, 0xf3, 0xe7, 0xee, 0x1d, 0xcc, 0xcd, 0xe3, 0x43, 0xd1, 0xc6, 0x87, 0xec, - 0x0b, 0xfa, 0x75, 0x0b, 0x2e, 0xf1, 0xd8, 0xf0, 0xfd, 0xf7, 0xbf, 0xe0, 0xb5, 0x5c, 0x93, 0xa4, - 0x62, 0x22, 0x1b, 0x24, 0x68, 0x2f, 0x7e, 0x48, 0x0c, 0xe8, 0xa5, 0xda, 0xe1, 0xda, 0xc2, 0x87, - 0xed, 0x9c, 0xfd, 0xcf, 0xca, 0x30, 0x2e, 0x62, 0x46, 0x89, 0x3b, 0xe0, 0x45, 0x63, 0x49, 0x3c, - 0x9e, 0x58, 0x12, 0xd3, 0x06, 0xf2, 0xd1, 0x1c, 0xff, 0x21, 0x4c, 0xd3, 0xc3, 0xf9, 0x2a, 0x71, - 0x82, 0x68, 0x93, 0x38, 0xdc, 0xe0, 0xab, 0x7c, 0xe8, 0xd3, 0x5f, 0xc9, 0x27, 0xaf, 0x27, 0x89, - 0xe1, 0x34, 0xfd, 0x6f, 0xa5, 0x3b, 0xc7, 0x83, 0xa9, 0x54, 0xd8, 0xaf, 0x4f, 0x40, 0x45, 0xf9, - 0x71, 0x88, 0x43, 0xa7, 0x38, 0x7a, 0x5e, 0x92, 0x02, 0x17, 0x7f, 0xc5, 0x3e, 0x44, 0x31, 0x39, - 0xfb, 0x1f, 0x94, 0x8c, 0x06, 0xf9, 0x24, 0xae, 0xc3, 0x88, 0x13, 0x86, 0xee, 0xb6, 0x47, 0x9a, - 0x45, 0x12, 0xca, 0x54, 0x33, 0xcc, 0x97, 0x66, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xca, 0xcd, - 0xea, 0xf6, 0x48, 0x91, 0x78, 0x32, 0x45, 0x0d, 0xa4, 0xe1, 0xdd, 0x1e, 0xc1, 0xa2, 0x3e, 0xfa, - 0x24, 0xb7, 0x7b, 0xbc, 0xe6, 0xf9, 0x77, 0xbc, 0x2b, 0xbe, 0x2f, 0xe3, 0x32, 0xf4, 0x47, 0x70, - 0x5a, 0x5a, 0x3b, 0xaa, 0xea, 0xd8, 0xa4, 0xd6, 0x5f, 0x1c, 0xcd, 0xcf, 0xc2, 0x09, 0x4a, 0xda, - 0x74, 0x9b, 0x0e, 0x11, 0x81, 0x49, 0x11, 0x90, 0x4c, 0x96, 0x89, 0xb1, 0xcb, 0x7c, 0xca, 0x99, - 0xb5, 0x63, 0x41, 0xfa, 0x35, 0x93, 0x04, 0x4e, 0xd2, 0xb4, 0x7f, 0xda, 0x02, 0xe6, 0x42, 0x7a, - 0x0c, 0xfc, 0xc8, 0x47, 0x4c, 0x7e, 0x64, 0x26, 0x6f, 0x90, 0x73, 0x58, 0x91, 0x17, 0xf8, 0xca, - 0xaa, 0x05, 0xfe, 0xdd, 0x7d, 0x61, 0xac, 0xd2, 0xfb, 0xfd, 0x61, 0xff, 0x1f, 0x8b, 0x1f, 0x62, - 0xca, 0xcb, 0x02, 0x7d, 0x27, 0x8c, 0x34, 0x9c, 0x8e, 0xd3, 0xe0, 0x19, 0x5b, 0x72, 0x25, 0x7a, - 0x46, 0xa5, 0xf9, 0x25, 0x51, 0x83, 0x4b, 0xa8, 0x64, 0x60, 0xbb, 0x11, 0x59, 0xdc, 0x53, 0x2a, - 0xa5, 0x9a, 0x9c, 0xdd, 0x85, 0x71, 0x83, 0xd8, 0x43, 0x15, 0x67, 0x7c, 0x27, 0xbf, 0x62, 0x55, - 0x20, 0xc6, 0x36, 0x4c, 0x7b, 0xda, 0x7f, 0x7a, 0xa1, 0xc8, 0xc7, 0xe5, 0x7b, 0x7b, 0x5d, 0xa2, - 0xec, 0xf6, 0xd1, 0xbc, 0x53, 0x13, 0x64, 0x70, 0x9a, 0xb2, 0xfd, 0x93, 0x16, 0x3c, 0xa2, 0x23, - 0x6a, 0x0e, 0x30, 0xbd, 0x94, 0x24, 0x55, 0x18, 0xf1, 0x3b, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xb7, - 0xc6, 0x45, 0x39, 0xe8, 0x37, 0x44, 0xf9, 0x7d, 0x11, 0xef, 0x5c, 0x52, 0x97, 0xe5, 0x58, 0xd5, - 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0xa1, 0x70, 0x75, 0x62, 0x67, 0x00, 0xd3, 0xa4, 0x87, 0x58, 0x40, - 0xec, 0xaf, 0x5b, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x6f, 0xc2, 0x54, 0xdb, 0x89, 0x1a, 0x3b, 0xcb, - 0x77, 0x3b, 0x01, 0x57, 0x39, 0xc9, 0x71, 0x7a, 0xba, 0xd7, 0x38, 0x69, 0x1f, 0x19, 0x9b, 0x72, - 0xae, 0x25, 0x88, 0xe1, 0x14, 0x79, 0xb4, 0x09, 0xa3, 0xac, 0x8c, 0x79, 0xf1, 0x85, 0x45, 0xac, - 0x41, 0x5e, 0x6b, 0xca, 0x18, 0x61, 0x2d, 0xa6, 0x83, 0x75, 0xa2, 0xf6, 0x97, 0xcb, 0x7c, 0xb7, - 0x33, 0x56, 0xfe, 0x29, 0x18, 0xee, 0xf8, 0xcd, 0xa5, 0xd5, 0x2a, 0x16, 0xb3, 0xa0, 0xae, 0x91, - 0x1a, 0x2f, 0xc6, 0x12, 0x8e, 0x2e, 0xc2, 0x88, 0xf8, 0x29, 0x55, 0x84, 0xec, 0x6c, 0x16, 0x78, - 0x21, 0x56, 0x50, 0xf4, 0x1c, 0x40, 0x27, 0xf0, 0xf7, 0xdc, 0x26, 0x8b, 0x2e, 0x51, 0x36, 0xed, - 0x88, 0x6a, 0x0a, 0x82, 0x35, 0x2c, 0xf4, 0x0a, 0x8c, 0x77, 0xbd, 0x90, 0xb3, 0x23, 0x5a, 0x2c, - 0x59, 0x65, 0xe1, 0x72, 0x53, 0x07, 0x62, 0x13, 0x17, 0x2d, 0xc0, 0x50, 0xe4, 0x30, 0xbb, 0x98, - 0xc1, 0x7c, 0x73, 0xdf, 0x0d, 0x8a, 0xa1, 0x27, 0x07, 0xa1, 0x15, 0xb0, 0xa8, 0x88, 0x3e, 0x21, - 0x1d, 0x6a, 0xf9, 0xc1, 0x2e, 0xec, 0xec, 0xfb, 0xbb, 0x04, 0x34, 0x77, 0x5a, 0x61, 0xbf, 0x6f, - 0xd0, 0x42, 0x2f, 0x03, 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa5, 0xac, 0xd9, 0x14, 0x5f, 0x50, - 0xf5, 0xd7, 0xfd, 0xe8, 0x66, 0x48, 0x96, 0x15, 0x06, 0xd6, 0xb0, 0xed, 0x5f, 0xaf, 0x00, 0xc4, - 0x7c, 0x3b, 0x7a, 0x2b, 0x75, 0x70, 0x3d, 0x53, 0xcc, 0xe9, 0x1f, 0xdd, 0xa9, 0x85, 0xbe, 0xcf, - 0x82, 0x51, 0xa7, 0xd5, 0xf2, 0x1b, 0x0e, 0x8f, 0xf6, 0x5b, 0x2a, 0x3e, 0x38, 0x45, 0xfb, 0x0b, - 0x71, 0x0d, 0xde, 0x85, 0xe7, 0xe5, 0x0a, 0xd5, 0x20, 0x3d, 0x7b, 0xa1, 0x37, 0x8c, 0x3e, 0x20, - 0x9f, 0x8a, 0x65, 0x63, 0x28, 0xd5, 0x53, 0xb1, 0xc2, 0xee, 0x08, 0xfd, 0x95, 0x78, 0xd3, 0x78, - 0x25, 0x0e, 0xe4, 0x7b, 0x0c, 0x1a, 0xec, 0x6b, 0xaf, 0x07, 0x22, 0xaa, 0xe9, 0xd1, 0x03, 0x06, - 0xf3, 0xdd, 0xf3, 0xb4, 0x77, 0x52, 0x8f, 0xc8, 0x01, 0x9f, 0x81, 0xc9, 0xa6, 0xc9, 0x04, 0x88, - 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0xcf, 0x10, 0x5f, 0xfb, 0x09, 0x00, 0x4e, 0x12, 0x46, 0x35, - 0x1e, 0x4c, 0x62, 0xd5, 0xdb, 0xf2, 0x85, 0xaf, 0x87, 0x9d, 0x3b, 0x97, 0xfb, 0x61, 0x44, 0xda, - 0x14, 0x33, 0xbe, 0xdd, 0xd7, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0xd7, 0x60, 0x88, 0xf9, 0x67, 0x85, - 0x33, 0x23, 0xf9, 0x12, 0x67, 0x33, 0x3a, 0x5a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, 0x0a, 0xe8, - 0xaa, 0xf4, 0x7e, 0x0c, 0x57, 0xbd, 0x9b, 0x21, 0x61, 0xde, 0x8f, 0x95, 0xc5, 0xf7, 0xc6, 0x8e, - 0x8d, 0xbc, 0x3c, 0x33, 0x85, 0x98, 0x51, 0x93, 0x72, 0x51, 0xe2, 0xbf, 0xcc, 0x4c, 0x36, 0x03, - 0xf9, 0xdd, 0x33, 0xb3, 0x97, 0xc5, 0xc3, 0x79, 0xcb, 0x24, 0x81, 0x93, 0x34, 0x29, 0x47, 0xca, - 0x77, 0xbd, 0xf0, 0x16, 0xe9, 0x75, 0x76, 0xf0, 0x87, 0x38, 0xbb, 0x8d, 0x78, 0x09, 0x16, 0xf5, - 0x8f, 0x95, 0x3d, 0x98, 0xf5, 0x60, 0x2a, 0xb9, 0x45, 0x1f, 0x2a, 0x3b, 0xf2, 0x07, 0x03, 0x30, - 0x61, 0x2e, 0x29, 0x74, 0x09, 0x2a, 0x82, 0x88, 0xca, 0x26, 0xa0, 0x76, 0xc9, 0x9a, 0x04, 0xe0, - 0x18, 0x87, 0x25, 0x91, 0x60, 0xd5, 0x35, 0xf3, 0xe0, 0x38, 0x89, 0x84, 0x82, 0x60, 0x0d, 0x8b, - 0x3e, 0xac, 0x36, 0x7d, 0x3f, 0x52, 0x17, 0x92, 0x5a, 0x77, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, - 0x88, 0x76, 0x49, 0xe0, 0x91, 0x96, 0x19, 0x77, 0x58, 0x5d, 0x44, 0xd7, 0x74, 0x20, 0x36, 0x71, - 0xe9, 0x75, 0xea, 0x87, 0x6c, 0x21, 0x8b, 0xe7, 0x5b, 0x6c, 0x6e, 0x5d, 0xe7, 0x0e, 0xd8, 0x12, - 0x8e, 0x3e, 0x0e, 0x8f, 0xa8, 0xd8, 0x4a, 0x98, 0x6b, 0x33, 0x64, 0x8b, 0x43, 0x86, 0xb4, 0xe5, - 0x91, 0xa5, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a, 0x13, 0x82, 0xc5, 0x97, 0x14, 0x87, 0x4d, - 0x0b, 0xa3, 0x6b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xe4, 0x64, 0xc6, 0x65, 0x4b, 0x0a, 0x23, 0xe9, - 0xc8, 0xc9, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0, 0x05, 0x98, 0xe4, 0x3c, 0x98, 0xeb, 0x6d, 0xf3, 0x39, - 0x11, 0xce, 0x5c, 0x6a, 0x4b, 0xdd, 0x30, 0xc1, 0x38, 0x89, 0x8f, 0x5e, 0x82, 0x31, 0x27, 0x68, - 0xec, 0xb8, 0x11, 0x69, 0x44, 0xdd, 0x80, 0x7b, 0x79, 0x69, 0x26, 0x5a, 0x0b, 0x1a, 0x0c, 0x1b, - 0x98, 0xf6, 0x5b, 0x70, 0x22, 0x23, 0x32, 0x03, 0x5d, 0x38, 0x4e, 0xc7, 0x95, 0xdf, 0x94, 0xb0, - 0x70, 0x5e, 0xa8, 0xad, 0xca, 0xaf, 0xd1, 0xb0, 0xe8, 0xea, 0x64, 0x11, 0x1c, 0xb4, 0x44, 0x84, - 0x6a, 0x75, 0xae, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0x51, 0x82, 0xc9, 0x0c, 0xdd, 0x0a, 0x4b, - 0x86, 0x97, 0x78, 0xa4, 0xc4, 0xb9, 0xef, 0xcc, 0x40, 0xdc, 0xa5, 0x43, 0x04, 0xe2, 0x2e, 0xf7, - 0x0a, 0xc4, 0x3d, 0xf0, 0x76, 0x02, 0x71, 0x9b, 0x23, 0x36, 0xd8, 0xd7, 0x88, 0x65, 0x04, 0xef, - 0x1e, 0x3a, 0x64, 0xf0, 0x6e, 0x63, 0xd0, 0x87, 0xfb, 0x18, 0xf4, 0x1f, 0x2d, 0xc1, 0x54, 0xd2, - 0x94, 0xf4, 0x18, 0xe4, 0xb6, 0xaf, 0x19, 0x72, 0xdb, 0x8b, 0xfd, 0x38, 0xdf, 0xe6, 0xca, 0x70, - 0x71, 0x42, 0x86, 0xfb, 0xfe, 0xbe, 0xa8, 0x15, 0xcb, 0x73, 0xff, 0x46, 0x09, 0x4e, 0x65, 0x7a, - 0xff, 0x1e, 0xc3, 0xd8, 0xdc, 0x30, 0xc6, 0xe6, 0xd9, 0xbe, 0x1d, 0x93, 0x73, 0x07, 0xe8, 0x76, - 0x62, 0x80, 0x2e, 0xf5, 0x4f, 0xb2, 0x78, 0x94, 0xbe, 0x56, 0x86, 0x73, 0x99, 0xf5, 0x62, 0xb1, - 0xe7, 0x8a, 0x21, 0xf6, 0x7c, 0x2e, 0x21, 0xf6, 0xb4, 0x8b, 0x6b, 0x1f, 0x8d, 0x1c, 0x54, 0x38, - 0xe8, 0xb2, 0x30, 0x03, 0x0f, 0x28, 0x03, 0x35, 0x1c, 0x74, 0x15, 0x21, 0x6c, 0xd2, 0xfd, 0x56, - 0x92, 0x7d, 0xfe, 0x5b, 0x0b, 0xce, 0x64, 0xce, 0xcd, 0x31, 0xc8, 0xba, 0xd6, 0x4d, 0x59, 0xd7, - 0x53, 0x7d, 0xaf, 0xd6, 0x1c, 0xe1, 0xd7, 0x97, 0x07, 0x73, 0xbe, 0x85, 0xbd, 0xe4, 0x6f, 0xc0, - 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf3, 0x9b, 0x2a, 0xd6, 0xf0, 0xb3, 0xec, 0x9d, 0x15, 0x17, - 0xdf, 0x3f, 0x98, 0x9b, 0x4d, 0x92, 0x88, 0xc1, 0x58, 0xa7, 0x80, 0x3e, 0x09, 0x23, 0xa1, 0xb8, - 0x37, 0xc5, 0xdc, 0x3f, 0xdf, 0xe7, 0xe0, 0x38, 0x9b, 0xa4, 0x65, 0x06, 0x43, 0x52, 0x92, 0x0a, - 0x45, 0xd2, 0x0c, 0x9c, 0x52, 0x3a, 0xd2, 0xc0, 0x29, 0xcf, 0x01, 0xec, 0xa9, 0xc7, 0x40, 0x52, - 0xfe, 0xa0, 0x3d, 0x13, 0x34, 0x2c, 0xf4, 0x51, 0x98, 0x0a, 0x79, 0xb4, 0xc0, 0xa5, 0x96, 0x13, - 0x32, 0x3f, 0x1a, 0xb1, 0x0a, 0x59, 0xc0, 0xa5, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0x68, 0x45, 0xb6, - 0xca, 0x42, 0x1b, 0xf2, 0x85, 0x79, 0x21, 0x6e, 0x51, 0xa4, 0xe2, 0x3d, 0x99, 0x1c, 0x7e, 0x36, - 0xf0, 0x5a, 0x4d, 0xf4, 0x49, 0x00, 0xba, 0x7c, 0x84, 0x1c, 0x62, 0x38, 0xff, 0xf0, 0xa4, 0xa7, - 0x4a, 0x33, 0xd3, 0xb8, 0x99, 0xf9, 0xd4, 0x56, 0x15, 0x11, 0xac, 0x11, 0x44, 0x5b, 0x30, 0x1e, - 0xff, 0x8b, 0x33, 0x55, 0x1e, 0xb2, 0x05, 0x26, 0xf7, 0xae, 0xea, 0x74, 0xb0, 0x49, 0xd6, 0xfe, - 0xf1, 0x61, 0x78, 0xb4, 0xe0, 0x2c, 0x46, 0x0b, 0xa6, 0xbe, 0xf7, 0xe9, 0xe4, 0x23, 0x7e, 0x36, - 0xb3, 0xb2, 0xf1, 0xaa, 0x4f, 0x2c, 0xf9, 0xd2, 0xdb, 0x5e, 0xf2, 0x3f, 0x64, 0x69, 0xe2, 0x15, - 0x6e, 0x59, 0xfa, 0x91, 0x43, 0xde, 0x31, 0x47, 0x28, 0x6f, 0xd9, 0xca, 0x10, 0x5a, 0x3c, 0xd7, - 0x77, 0x77, 0xfa, 0x97, 0x62, 0x7c, 0xc5, 0x02, 0x24, 0xc4, 0x2b, 0xa4, 0xa9, 0x36, 0x94, 0x90, - 0x67, 0x5c, 0x39, 0xec, 0xf7, 0x2f, 0xa4, 0x28, 0xf1, 0x91, 0x78, 0x59, 0x5e, 0x06, 0x69, 0x84, - 0x9e, 0x63, 0x92, 0xd1, 0x3d, 0xf4, 0x71, 0x16, 0x4d, 0xd7, 0x7d, 0x4b, 0x70, 0x40, 0x62, 0xc3, - 0xbd, 0x28, 0x22, 0xe9, 0xaa, 0x72, 0xca, 0xea, 0x66, 0x76, 0x57, 0x47, 0xc2, 0x06, 0xa9, 0xe3, - 0x7d, 0x7f, 0x77, 0xe1, 0x91, 0x9c, 0x21, 0x7b, 0xa8, 0xcf, 0xf0, 0xdf, 0xb2, 0xe0, 0x6c, 0x61, - 0x58, 0x98, 0x6f, 0x42, 0x06, 0xd1, 0xfe, 0x9c, 0x05, 0xd9, 0x93, 0x6d, 0x98, 0x95, 0x5d, 0x82, - 0x4a, 0x83, 0x16, 0x6a, 0x7e, 0xc0, 0x71, 0x80, 0x04, 0x09, 0xc0, 0x31, 0x8e, 0x61, 0x3d, 0x56, - 0xea, 0x69, 0x3d, 0xf6, 0x2b, 0x16, 0xa4, 0x0e, 0xf9, 0x63, 0xe0, 0x36, 0x56, 0x4d, 0x6e, 0xe3, - 0xbd, 0xfd, 0x8c, 0x66, 0x0e, 0xa3, 0xf1, 0xc7, 0x93, 0x70, 0x3a, 0xc7, 0x2d, 0x6f, 0x0f, 0xa6, - 0xb7, 0x1b, 0xc4, 0xf4, 0xb0, 0x2e, 0x8a, 0x3c, 0x54, 0xe8, 0x8e, 0xcd, 0x92, 0xc3, 0x4e, 0xa7, - 0x50, 0x70, 0xba, 0x09, 0xf4, 0x39, 0x0b, 0x4e, 0x3a, 0x77, 0xc2, 0x65, 0xca, 0x35, 0xba, 0x8d, - 0xc5, 0x96, 0xdf, 0xd8, 0xa5, 0x57, 0xb2, 0xdc, 0x08, 0x2f, 0x64, 0x4a, 0xf2, 0x6e, 0xd7, 0x53, - 0xf8, 0x46, 0xf3, 0x2c, 0x5b, 0x6e, 0x16, 0x16, 0xce, 0x6c, 0x0b, 0x61, 0x91, 0x42, 0x81, 0xbe, - 0x49, 0x0b, 0x62, 0x00, 0x64, 0xf9, 0x4f, 0x72, 0x36, 0x48, 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0d, - 0x95, 0x6d, 0xe9, 0xee, 0x9b, 0xc1, 0x66, 0xc5, 0x03, 0x59, 0xec, 0x04, 0xcd, 0xd5, 0xf1, 0x0a, - 0x09, 0xc7, 0x44, 0xd1, 0xab, 0x50, 0xf6, 0xb6, 0xc2, 0xa2, 0x84, 0xb3, 0x09, 0xbb, 0x4b, 0x1e, - 0x69, 0x63, 0x7d, 0xa5, 0x8e, 0x69, 0x45, 0x74, 0x15, 0xca, 0xc1, 0x66, 0x53, 0x88, 0xa1, 0x33, - 0x37, 0x29, 0x5e, 0xac, 0xe6, 0xf4, 0x8a, 0x51, 0xc2, 0x8b, 0x55, 0x4c, 0x49, 0xa0, 0x1a, 0x0c, - 0x32, 0x5f, 0x36, 0xc1, 0xd4, 0x64, 0x3e, 0xdf, 0x0a, 0x7c, 0x42, 0x79, 0x38, 0x0e, 0x86, 0x80, - 0x39, 0x21, 0xb4, 0x01, 0x43, 0x0d, 0x96, 0x9c, 0x54, 0x70, 0x31, 0x1f, 0xc8, 0x14, 0x38, 0x17, - 0x64, 0x6d, 0x15, 0xf2, 0x57, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x74, 0x76, 0xb6, 0x42, 0x91, - 0x4c, 0x3b, 0x9b, 0x6a, 0x41, 0x32, 0x62, 0x41, 0x95, 0x61, 0x60, 0x41, 0x0b, 0xbd, 0x0c, 0xa5, - 0xad, 0x86, 0xf0, 0x53, 0xcb, 0x94, 0x3c, 0x9b, 0xc1, 0x52, 0x16, 0x87, 0xee, 0x1d, 0xcc, 0x95, - 0x56, 0x96, 0x70, 0x69, 0xab, 0x81, 0xd6, 0x61, 0x78, 0x8b, 0x87, 0x57, 0x10, 0xc2, 0xe5, 0x27, - 0xb3, 0x23, 0x3f, 0xa4, 0x22, 0x30, 0x70, 0x9f, 0x27, 0x01, 0xc0, 0x92, 0x08, 0xcb, 0x48, 0xa0, - 0xc2, 0x44, 0x88, 0x28, 0x75, 0xf3, 0x87, 0x0b, 0xed, 0xc1, 0x99, 0xcc, 0x38, 0xd8, 0x04, 0xd6, - 0x28, 0xd2, 0x55, 0xed, 0xbc, 0xd5, 0x0d, 0x58, 0x28, 0x70, 0x11, 0xce, 0x28, 0x73, 0x55, 0x2f, - 0x48, 0xa4, 0xa2, 0x55, 0xad, 0x90, 0x70, 0x4c, 0x14, 0xed, 0xc2, 0xf8, 0x5e, 0xd8, 0xd9, 0x21, - 0x72, 0x4b, 0xb3, 0xe8, 0x46, 0x39, 0xfc, 0xd1, 0x2d, 0x81, 0xe8, 0x06, 0x51, 0xd7, 0x69, 0xa5, - 0x4e, 0x21, 0xc6, 0xcb, 0xde, 0xd2, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, 0x37, 0xbb, 0xfe, 0xe6, - 0x7e, 0x44, 0x44, 0x70, 0xb9, 0xcc, 0xe1, 0x7f, 0x9d, 0xa3, 0xa4, 0x87, 0x5f, 0x00, 0xb0, 0x24, - 0x82, 0x6e, 0x89, 0xe1, 0x61, 0xa7, 0xe7, 0x54, 0x7e, 0x04, 0xd8, 0x05, 0x89, 0x94, 0x33, 0x28, - 0xec, 0xb4, 0x8c, 0x49, 0xb1, 0x53, 0xb2, 0xb3, 0xe3, 0x47, 0xbe, 0x97, 0x38, 0xa1, 0xa7, 0xf3, - 0x4f, 0xc9, 0x5a, 0x06, 0x7e, 0xfa, 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0xa1, 0x26, 0x4c, 0x74, - 0xfc, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7d, 0xa1, 0x02, 0xe1, 0x98, 0x81, 0x29, 0x5a, 0x64, 0x71, - 0x1b, 0x4d, 0x08, 0x4e, 0xd0, 0x44, 0x1f, 0x83, 0xe1, 0xb0, 0xe1, 0xb4, 0xc8, 0xea, 0x8d, 0x99, - 0x13, 0xf9, 0xd7, 0x4f, 0x9d, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x1d, 0x83, 0xa3, 0x60, 0x49, 0x0e, - 0xad, 0xc0, 0x20, 0xcb, 0x38, 0xc7, 0x22, 0x21, 0xe6, 0x04, 0xb2, 0x4d, 0x59, 0xc1, 0xf3, 0xb3, - 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0xf1, 0x46, 0xf4, 0xc3, 0x99, 0x53, 0xf9, 0x7b, 0x40, - 0x3c, 0x2d, 0x6f, 0xd4, 0x8b, 0xf6, 0x80, 0x42, 0xc2, 0x31, 0x51, 0x7a, 0x32, 0xd3, 0xd3, 0xf4, - 0x74, 0x81, 0xf9, 0x56, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x0d, - 0xa7, 0x79, 0x16, 0x26, 0x55, 0xf8, 0x1e, 0x2b, 0xa5, 0x70, 0xfe, 0x60, 0xbf, 0x42, 0xce, 0x23, - 0x7c, 0x0a, 0x7d, 0xce, 0x82, 0xd3, 0x9d, 0xcc, 0x0f, 0x11, 0x0c, 0x40, 0x7f, 0xb2, 0x52, 0xfe, - 0xe9, 0x2a, 0x6a, 0x66, 0x36, 0x1c, 0xe7, 0xb4, 0x94, 0x7c, 0x6e, 0x96, 0xdf, 0xf6, 0x73, 0x73, - 0x0d, 0x46, 0x1a, 0xfc, 0x29, 0x52, 0x98, 0xac, 0x3b, 0xf9, 0xf6, 0x66, 0xac, 0x84, 0x78, 0xc3, - 0x6c, 0x61, 0x45, 0x02, 0xfd, 0xb0, 0x05, 0x67, 0x93, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0xa8, 0x4d, - 0x2e, 0xd0, 0x58, 0x11, 0xdf, 0x9f, 0xe2, 0xff, 0x0d, 0xe4, 0xfb, 0xbd, 0x10, 0x70, 0x71, 0x63, - 0xa8, 0x9a, 0x21, 0x51, 0x19, 0x32, 0xb5, 0x48, 0x7d, 0x48, 0x55, 0x5e, 0x80, 0xb1, 0xb6, 0xdf, - 0xf5, 0x22, 0x61, 0xed, 0x25, 0x2c, 0x4f, 0x98, 0xc5, 0xc5, 0x9a, 0x56, 0x8e, 0x0d, 0xac, 0x84, - 0x2c, 0x66, 0xe4, 0x81, 0x65, 0x31, 0x6f, 0xc0, 0x98, 0xa7, 0x99, 0x27, 0x0b, 0x7e, 0xe0, 0x42, - 0x7e, 0x98, 0x5c, 0xdd, 0x98, 0x99, 0xf7, 0x52, 0x2f, 0xc1, 0x06, 0xb5, 0xe3, 0x35, 0x03, 0xfb, - 0x92, 0x95, 0xc1, 0xd4, 0x73, 0x51, 0xcc, 0x87, 0x4d, 0x51, 0xcc, 0x85, 0xa4, 0x28, 0x26, 0xa5, - 0x41, 0x30, 0xa4, 0x30, 0xfd, 0x67, 0x01, 0xea, 0x37, 0xd4, 0xa6, 0xdd, 0x82, 0xf3, 0xbd, 0xae, - 0x25, 0x66, 0xf6, 0xd7, 0x54, 0xfa, 0xe2, 0xd8, 0xec, 0xaf, 0xb9, 0x5a, 0xc5, 0x0c, 0xd2, 0x6f, - 0x10, 0x27, 0xfb, 0xbf, 0x59, 0x50, 0xae, 0xf9, 0xcd, 0x63, 0x78, 0xf0, 0x7e, 0xc4, 0x78, 0xf0, - 0x3e, 0x9a, 0x7d, 0x21, 0x36, 0x73, 0xf5, 0x1f, 0xcb, 0x09, 0xfd, 0xc7, 0xd9, 0x3c, 0x02, 0xc5, - 0xda, 0x8e, 0x9f, 0x2a, 0xc3, 0x68, 0xcd, 0x6f, 0x2a, 0x9b, 0xfb, 0x7f, 0xf1, 0x20, 0x36, 0xf7, - 0xb9, 0xb9, 0x2c, 0x34, 0xca, 0xcc, 0x5a, 0x50, 0xba, 0x1b, 0x7f, 0x93, 0x99, 0xde, 0xdf, 0x26, - 0xee, 0xf6, 0x4e, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0x3e, 0xd3, 0xfb, 0xdf, 0x2b, 0xc1, 0x64, 0xa2, - 0x75, 0xd4, 0x82, 0xf1, 0x96, 0x2e, 0x5d, 0x17, 0xeb, 0xf4, 0x81, 0x04, 0xf3, 0xc2, 0x74, 0x59, - 0x2b, 0xc2, 0x26, 0x71, 0x34, 0x0f, 0xa0, 0xd4, 0xcd, 0x52, 0xbc, 0xca, 0xb8, 0x7e, 0xa5, 0x8f, - 0x0e, 0xb1, 0x86, 0x81, 0x5e, 0x84, 0xd1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x7b, 0xff, 0x1a, 0x91, - 0xf1, 0xbd, 0x94, 0x41, 0xe2, 0x46, 0x0c, 0xc2, 0x3a, 0x1e, 0xba, 0x0b, 0xd3, 0x8a, 0x48, 0xfd, - 0x08, 0x34, 0x0e, 0x4c, 0xaa, 0xb0, 0x9e, 0xa4, 0x88, 0xd3, 0x8d, 0xd8, 0x3f, 0x53, 0xe6, 0x43, - 0xec, 0x45, 0xee, 0xbb, 0xbb, 0xe1, 0x9d, 0xbd, 0x1b, 0xbe, 0x66, 0xc1, 0x14, 0x6d, 0x9d, 0x59, - 0x5b, 0xc9, 0x6b, 0x5e, 0x05, 0xe6, 0xb6, 0x0a, 0x02, 0x73, 0x5f, 0xa0, 0xa7, 0x66, 0xd3, 0xef, - 0x46, 0x42, 0x76, 0xa7, 0x1d, 0x8b, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, 0x3c, 0x44, - 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xdc, 0xee, 0x81, 0xec, 0xb8, 0xdd, 0x3c, 0xfc, 0xaa, - 0xb0, 0xcb, 0x11, 0x0c, 0x97, 0x16, 0x7e, 0x55, 0x1a, 0xec, 0xc4, 0x38, 0xf6, 0x57, 0xca, 0x30, - 0x56, 0xf3, 0x9b, 0xb1, 0xaa, 0xf9, 0x05, 0x43, 0xd5, 0x7c, 0x3e, 0xa1, 0x6a, 0x9e, 0xd2, 0x71, - 0xdf, 0x55, 0x2c, 0x7f, 0xa3, 0x14, 0xcb, 0xff, 0xd4, 0x62, 0xb3, 0x56, 0x5d, 0xaf, 0x73, 0xe3, - 0x3d, 0x74, 0x19, 0x46, 0xd9, 0x01, 0xc3, 0x5c, 0x92, 0xa5, 0xfe, 0x95, 0xe5, 0xa3, 0x5a, 0x8f, - 0x8b, 0xb1, 0x8e, 0x83, 0x2e, 0xc2, 0x48, 0x48, 0x9c, 0xa0, 0xb1, 0xa3, 0x4e, 0x57, 0xa1, 0x2c, - 0xe5, 0x65, 0x58, 0x41, 0xd1, 0xeb, 0x71, 0xe4, 0xcf, 0x72, 0xbe, 0x8b, 0xa3, 0xde, 0x1f, 0xbe, - 0x45, 0xf2, 0xc3, 0x7d, 0xda, 0xb7, 0x01, 0xa5, 0xf1, 0xfb, 0x88, 0x4d, 0x37, 0x67, 0xc6, 0xa6, - 0xab, 0xa4, 0xe2, 0xd2, 0xfd, 0x99, 0x05, 0x13, 0x35, 0xbf, 0x49, 0xb7, 0xee, 0xb7, 0xd2, 0x3e, - 0xd5, 0xc3, 0x1e, 0x0f, 0x15, 0x84, 0x3d, 0x7e, 0x02, 0x06, 0x6b, 0x7e, 0x73, 0xb5, 0x56, 0x14, - 0x5f, 0xc0, 0xfe, 0x9b, 0x16, 0x0c, 0xd7, 0xfc, 0xe6, 0x31, 0xa8, 0x05, 0x3e, 0x6c, 0xaa, 0x05, - 0x1e, 0xc9, 0x59, 0x37, 0x39, 0x9a, 0x80, 0xbf, 0x3e, 0x00, 0xe3, 0xb4, 0x9f, 0xfe, 0xb6, 0x9c, - 0x4a, 0x63, 0xd8, 0xac, 0x3e, 0x86, 0x8d, 0x72, 0xe1, 0x7e, 0xab, 0xe5, 0xdf, 0x49, 0x4e, 0xeb, - 0x0a, 0x2b, 0xc5, 0x02, 0x8a, 0x9e, 0x81, 0x91, 0x4e, 0x40, 0xf6, 0x5c, 0x5f, 0xb0, 0xb7, 0x9a, - 0x92, 0xa5, 0x26, 0xca, 0xb1, 0xc2, 0xa0, 0xcf, 0xc2, 0xd0, 0xf5, 0xe8, 0x55, 0xde, 0xf0, 0xbd, - 0x26, 0x97, 0x9c, 0x97, 0x45, 0x6e, 0x0e, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x36, 0x54, 0xd8, 0x7f, - 0x76, 0xec, 0x1c, 0x3e, 0xcb, 0xab, 0xc8, 0xfa, 0x27, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x0e, 0x20, - 0x92, 0xf1, 0xed, 0x43, 0x11, 0x6d, 0x4d, 0x3d, 0x05, 0x54, 0xe4, 0xfb, 0x10, 0x6b, 0x58, 0xe8, - 0x69, 0xa8, 0x44, 0x8e, 0xdb, 0xba, 0xee, 0x7a, 0x24, 0x64, 0x12, 0xf1, 0xb2, 0x4c, 0xbe, 0x27, - 0x0a, 0x71, 0x0c, 0xa7, 0xac, 0x18, 0x8b, 0xc4, 0xc1, 0x73, 0x44, 0x8f, 0x30, 0x6c, 0xc6, 0x8a, - 0x5d, 0x57, 0xa5, 0x58, 0xc3, 0x40, 0x3b, 0xf0, 0x98, 0xeb, 0xb1, 0x3c, 0x16, 0xa4, 0xbe, 0xeb, - 0x76, 0x36, 0xae, 0xd7, 0x6f, 0x91, 0xc0, 0xdd, 0xda, 0x5f, 0x74, 0x1a, 0xbb, 0xc4, 0x93, 0xf9, - 0x3b, 0xdf, 0x2b, 0xba, 0xf8, 0xd8, 0x6a, 0x01, 0x2e, 0x2e, 0xa4, 0x64, 0x3f, 0xcf, 0xd6, 0xfb, - 0x8d, 0x3a, 0x7a, 0xbf, 0x71, 0x74, 0x9c, 0xd6, 0x8f, 0x8e, 0xfb, 0x07, 0x73, 0x43, 0x37, 0xea, - 0x5a, 0x20, 0x89, 0x97, 0xe0, 0x54, 0xcd, 0x6f, 0xd6, 0xfc, 0x20, 0x5a, 0xf1, 0x83, 0x3b, 0x4e, - 0xd0, 0x94, 0xcb, 0x6b, 0x4e, 0x86, 0xd2, 0xa0, 0xe7, 0xe7, 0x20, 0x3f, 0x5d, 0x8c, 0x30, 0x19, - 0xcf, 0x33, 0x8e, 0xed, 0x90, 0x0e, 0x60, 0x0d, 0xc6, 0x3b, 0xa8, 0x4c, 0x30, 0x57, 0x9c, 0x88, - 0xa0, 0x1b, 0x2c, 0xc3, 0x75, 0x7c, 0x8d, 0x8a, 0xea, 0x4f, 0x69, 0x19, 0xae, 0x63, 0x60, 0xe6, - 0xbd, 0x6b, 0xd6, 0xb7, 0xff, 0xfb, 0x20, 0x3b, 0x51, 0x13, 0xd9, 0x44, 0xd0, 0xa7, 0x60, 0x22, - 0x24, 0xd7, 0x5d, 0xaf, 0x7b, 0x57, 0x8a, 0x30, 0x0a, 0x5c, 0xf8, 0xea, 0xcb, 0x3a, 0x26, 0x17, - 0x84, 0x9a, 0x65, 0x38, 0x41, 0x0d, 0xb5, 0x61, 0xe2, 0x8e, 0xeb, 0x35, 0xfd, 0x3b, 0xa1, 0xa4, - 0x3f, 0x92, 0x2f, 0x0f, 0xbd, 0xcd, 0x31, 0x13, 0x7d, 0x34, 0x9a, 0xbb, 0x6d, 0x10, 0xc3, 0x09, - 0xe2, 0x74, 0xd5, 0x06, 0x5d, 0x6f, 0x21, 0xbc, 0x19, 0x92, 0x40, 0xe4, 0x2a, 0x67, 0xab, 0x16, - 0xcb, 0x42, 0x1c, 0xc3, 0xe9, 0xaa, 0x65, 0x7f, 0xae, 0x04, 0x7e, 0x97, 0xa7, 0xae, 0x10, 0xab, - 0x16, 0xab, 0x52, 0xac, 0x61, 0xd0, 0x5d, 0xcd, 0xfe, 0xad, 0xfb, 0x1e, 0xf6, 0xfd, 0x48, 0x9e, - 0x03, 0x4c, 0xa7, 0xaf, 0x95, 0x63, 0x03, 0x0b, 0xad, 0x00, 0x0a, 0xbb, 0x9d, 0x4e, 0x8b, 0xd9, - 0x06, 0x39, 0x2d, 0x46, 0x8a, 0xdb, 0x4b, 0x94, 0x79, 0xe8, 0xdd, 0x7a, 0x0a, 0x8a, 0x33, 0x6a, - 0xd0, 0x03, 0x7e, 0x4b, 0x74, 0x75, 0x90, 0x75, 0x95, 0xeb, 0x4e, 0xea, 0xbc, 0x9f, 0x12, 0x86, - 0x96, 0x61, 0x38, 0xdc, 0x0f, 0x1b, 0x91, 0x88, 0x94, 0x98, 0x93, 0x30, 0xaa, 0xce, 0x50, 0xb4, - 0x7c, 0x85, 0xbc, 0x0a, 0x96, 0x75, 0x51, 0x03, 0x4e, 0x08, 0x8a, 0x4b, 0x3b, 0x8e, 0xa7, 0xd2, - 0xef, 0x70, 0x13, 0xe9, 0xcb, 0xf7, 0x0e, 0xe6, 0x4e, 0x88, 0x96, 0x75, 0xf0, 0xfd, 0x83, 0xb9, - 0xd3, 0x35, 0xbf, 0x99, 0x01, 0xc1, 0x59, 0xd4, 0xf8, 0xe2, 0x6b, 0x34, 0xfc, 0x76, 0xa7, 0x16, - 0xf8, 0x5b, 0x6e, 0x8b, 0x14, 0xe9, 0x9f, 0xea, 0x06, 0xa6, 0x58, 0x7c, 0x46, 0x19, 0x4e, 0x50, - 0xb3, 0xbf, 0x93, 0x31, 0x41, 0x2c, 0x3d, 0x77, 0xd4, 0x0d, 0x08, 0x6a, 0xc3, 0x78, 0x87, 0x6d, - 0x13, 0x91, 0x50, 0x42, 0xac, 0xf5, 0x17, 0xfa, 0x94, 0xa3, 0xdc, 0xa1, 0x77, 0x87, 0x69, 0x63, - 0x54, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x1b, 0x8f, 0xb0, 0x6b, 0xb4, 0xce, 0x85, 0x23, 0xc3, - 0xc2, 0x23, 0x43, 0xbc, 0xc7, 0x66, 0xf3, 0xa5, 0x74, 0xf1, 0xb4, 0x08, 0xaf, 0x0e, 0x2c, 0xeb, - 0xa2, 0x4f, 0xc2, 0x04, 0x7d, 0xde, 0xa8, 0xab, 0x2c, 0x9c, 0x39, 0x99, 0x1f, 0x39, 0x43, 0x61, - 0xe9, 0xc9, 0x66, 0xf4, 0xca, 0x38, 0x41, 0x0c, 0xbd, 0xce, 0x6c, 0x7a, 0x24, 0xe9, 0x52, 0x3f, - 0xa4, 0x75, 0xf3, 0x1d, 0x49, 0x56, 0x23, 0x82, 0xba, 0x70, 0x22, 0x9d, 0x9a, 0x2e, 0x9c, 0xb1, - 0xf3, 0xf9, 0xc4, 0x74, 0x76, 0xb9, 0x38, 0x2b, 0x48, 0x1a, 0x16, 0xe2, 0x2c, 0xfa, 0xe8, 0x3a, - 0x8c, 0x8b, 0x1c, 0xd5, 0x62, 0xe5, 0x96, 0x0d, 0xe1, 0xe1, 0x38, 0xd6, 0x81, 0xf7, 0x93, 0x05, - 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0xac, 0x96, 0x33, 0xea, 0x4a, 0xe0, 0x30, 0x0b, 0x00, 0x97, 0x1d, - 0xa7, 0xda, 0x05, 0xff, 0xf8, 0xbd, 0x83, 0xb9, 0xb3, 0x1b, 0x45, 0x88, 0xb8, 0x98, 0x0e, 0xba, - 0x01, 0xa7, 0xb8, 0xdf, 0x77, 0x95, 0x38, 0xcd, 0x96, 0xeb, 0x29, 0x0e, 0x82, 0x6f, 0xf9, 0x33, - 0xf7, 0x0e, 0xe6, 0x4e, 0x2d, 0x64, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xc3, 0x50, 0x69, 0x7a, 0xa1, - 0x18, 0x83, 0x21, 0x23, 0x2d, 0x57, 0xa5, 0xba, 0x5e, 0x57, 0xdf, 0x1f, 0xff, 0xc1, 0x71, 0x05, - 0xb4, 0xcd, 0x05, 0xcc, 0x4a, 0xec, 0x31, 0x9c, 0x8a, 0x7b, 0x95, 0x94, 0x0c, 0x1a, 0x9e, 0x9f, - 0x5c, 0xb3, 0xa2, 0x1c, 0x22, 0x0c, 0xa7, 0x50, 0x83, 0x30, 0x7a, 0x0d, 0x90, 0x08, 0xff, 0xbe, - 0xd0, 0x60, 0xd9, 0x4a, 0x98, 0x3c, 0x7e, 0xc4, 0xf4, 0x45, 0xac, 0xa7, 0x30, 0x70, 0x46, 0x2d, - 0x74, 0x95, 0x9e, 0x2a, 0x7a, 0xa9, 0x38, 0xb5, 0x54, 0x12, 0xc5, 0x2a, 0xe9, 0x04, 0x84, 0x59, - 0x34, 0x99, 0x14, 0x71, 0xa2, 0x1e, 0x6a, 0xc2, 0x63, 0x4e, 0x37, 0xf2, 0x99, 0xec, 0xde, 0x44, - 0xdd, 0xf0, 0x77, 0x89, 0xc7, 0xd4, 0x66, 0x23, 0x8b, 0xe7, 0x29, 0x8b, 0xb2, 0x50, 0x80, 0x87, - 0x0b, 0xa9, 0x50, 0xd6, 0x52, 0x65, 0x4d, 0x06, 0x33, 0x9a, 0x57, 0x46, 0xe6, 0xe4, 0x17, 0x61, - 0x74, 0xc7, 0x0f, 0xa3, 0x75, 0x12, 0xdd, 0xf1, 0x83, 0x5d, 0x11, 0x95, 0x36, 0x8e, 0xf1, 0x1d, - 0x83, 0xb0, 0x8e, 0x47, 0xdf, 0x8e, 0xcc, 0xa8, 0x63, 0xb5, 0xca, 0xf4, 0xe9, 0x23, 0xf1, 0x19, - 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xab, 0xb5, 0x25, 0xa6, 0x1b, 0x4f, 0xa0, 0xae, 0xd6, - 0x96, 0xb0, 0x84, 0xd3, 0xe5, 0x1a, 0xee, 0x38, 0x01, 0xa9, 0x05, 0x7e, 0x83, 0x84, 0x5a, 0x64, - 0xf9, 0x47, 0x79, 0xcc, 0x5d, 0xba, 0x5c, 0xeb, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x22, 0xe9, 0x7c, - 0x69, 0x13, 0xf9, 0x4a, 0x8d, 0x34, 0x3f, 0xd3, 0x67, 0xca, 0x34, 0x0f, 0xa6, 0x54, 0xa6, 0x36, - 0x1e, 0x65, 0x37, 0x9c, 0x99, 0x64, 0x6b, 0xbb, 0xff, 0x10, 0xbd, 0x4a, 0x4d, 0xb4, 0x9a, 0xa0, - 0x84, 0x53, 0xb4, 0x8d, 0x80, 0x6d, 0x53, 0x3d, 0x03, 0xb6, 0x5d, 0x82, 0x4a, 0xd8, 0xdd, 0x6c, - 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0x6e, 0x5c, 0x7b, 0xc4, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0x68, 0x05, - 0x46, 0x1c, 0xa9, 0x03, 0x42, 0xf9, 0x21, 0x7a, 0x94, 0xe6, 0x87, 0x47, 0xad, 0x90, 0x5a, 0x1f, - 0x55, 0x17, 0xbd, 0x02, 0xe3, 0xc2, 0x6f, 0x59, 0x24, 0x09, 0x3d, 0x61, 0x3a, 0x97, 0xd5, 0x75, - 0x20, 0x36, 0x71, 0xd1, 0x4d, 0x18, 0x8d, 0xfc, 0x16, 0xf3, 0x90, 0xa2, 0x6c, 0xde, 0xe9, 0xfc, - 0x60, 0x73, 0x1b, 0x0a, 0x4d, 0x17, 0xbf, 0xaa, 0xaa, 0x58, 0xa7, 0x83, 0x36, 0xf8, 0x7a, 0x67, - 0x71, 0xe4, 0x49, 0x38, 0xf3, 0x48, 0xfe, 0x9d, 0xa4, 0xc2, 0xcd, 0x9b, 0xdb, 0x41, 0xd4, 0xc4, - 0x3a, 0x19, 0x74, 0x05, 0xa6, 0x3b, 0x81, 0xeb, 0xb3, 0x35, 0xa1, 0xd4, 0x7f, 0x33, 0x66, 0xd6, - 0xa8, 0x5a, 0x12, 0x01, 0xa7, 0xeb, 0x30, 0xb7, 0x73, 0x51, 0x38, 0x73, 0x86, 0x67, 0xbe, 0xe0, - 0x6f, 0x42, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb1, 0x93, 0x98, 0x8b, 0x33, 0x66, 0x66, 0xf3, 0xa3, - 0x02, 0xe9, 0x62, 0x0f, 0xce, 0xbc, 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0x9a, 0x5a, 0xc2, 0x49, 0xfa, - 0x62, 0x08, 0x67, 0x1e, 0x2b, 0xb0, 0xac, 0x4b, 0x3c, 0x2f, 0x62, 0x86, 0xc0, 0x28, 0x0e, 0x71, - 0x82, 0x26, 0xfa, 0x28, 0x4c, 0x89, 0x58, 0x86, 0xf1, 0x30, 0x9d, 0x8d, 0xed, 0xce, 0x71, 0x02, - 0x86, 0x53, 0xd8, 0x3c, 0xf3, 0x84, 0xb3, 0xd9, 0x22, 0xe2, 0xe8, 0xbb, 0xee, 0x7a, 0xbb, 0xe1, - 0xcc, 0x39, 0x76, 0x3e, 0x88, 0xcc, 0x13, 0x49, 0x28, 0xce, 0xa8, 0x81, 0x36, 0x60, 0xaa, 0x13, - 0x10, 0xd2, 0x66, 0x8c, 0xbe, 0xb8, 0xcf, 0xe6, 0x78, 0xd4, 0x05, 0xda, 0x93, 0x5a, 0x02, 0x76, - 0x3f, 0xa3, 0x0c, 0xa7, 0x28, 0xa0, 0x3b, 0x30, 0xe2, 0xef, 0x91, 0x60, 0x87, 0x38, 0xcd, 0x99, - 0xf3, 0x05, 0x7e, 0x10, 0xe2, 0x72, 0xbb, 0x21, 0x70, 0x13, 0x26, 0x03, 0xb2, 0xb8, 0xb7, 0xc9, - 0x80, 0x6c, 0x0c, 0xfd, 0x88, 0x05, 0x67, 0xa4, 0x96, 0xa1, 0xde, 0xa1, 0xa3, 0xbe, 0xe4, 0x7b, - 0x61, 0x14, 0xf0, 0x38, 0x01, 0x8f, 0xe7, 0xfb, 0xce, 0x6f, 0xe4, 0x54, 0x52, 0x12, 0xd5, 0x33, - 0x79, 0x18, 0x21, 0xce, 0x6f, 0x11, 0x2d, 0xc1, 0x74, 0x48, 0x22, 0x79, 0x18, 0x2d, 0x84, 0x2b, - 0xaf, 0x57, 0xd7, 0x67, 0x9e, 0xe0, 0x41, 0x0e, 0xe8, 0x66, 0xa8, 0x27, 0x81, 0x38, 0x8d, 0x8f, - 0x2e, 0x43, 0xc9, 0x0f, 0x67, 0xde, 0x5b, 0x90, 0xa3, 0x94, 0x3e, 0xc5, 0xb9, 0xe9, 0xd8, 0x8d, - 0x3a, 0x2e, 0xf9, 0xe1, 0xec, 0xb7, 0xc3, 0x74, 0x8a, 0x63, 0x38, 0x4c, 0x6e, 0x9f, 0xd9, 0x5d, - 0x18, 0x37, 0x66, 0xe5, 0xa1, 0x6a, 0xa9, 0xff, 0xf5, 0x30, 0x54, 0x94, 0x06, 0x13, 0x5d, 0x32, - 0x15, 0xd3, 0x67, 0x92, 0x8a, 0xe9, 0x91, 0x9a, 0xdf, 0x34, 0x74, 0xd1, 0x1b, 0x19, 0xd1, 0xe0, - 0xf2, 0xce, 0x80, 0xfe, 0x0d, 0xe4, 0x35, 0xb1, 0x70, 0xb9, 0x6f, 0x0d, 0xf7, 0x40, 0xa1, 0xa4, - 0xf9, 0x0a, 0x4c, 0x7b, 0x3e, 0x63, 0x53, 0x49, 0x53, 0xf2, 0x20, 0x8c, 0xd5, 0xa8, 0xe8, 0xe1, - 0x55, 0x12, 0x08, 0x38, 0x5d, 0x87, 0x36, 0xc8, 0x79, 0x85, 0xa4, 0x68, 0x9b, 0xb3, 0x12, 0x58, - 0x40, 0xd1, 0x13, 0x30, 0xd8, 0xf1, 0x9b, 0xab, 0x35, 0xc1, 0xa2, 0x6a, 0x31, 0x48, 0x9b, 0xab, - 0x35, 0xcc, 0x61, 0x68, 0x01, 0x86, 0xd8, 0x8f, 0x70, 0x66, 0x2c, 0x3f, 0x8e, 0x06, 0xab, 0xa1, - 0x65, 0x4e, 0x62, 0x15, 0xb0, 0xa8, 0xc8, 0x44, 0x6c, 0x94, 0xaf, 0x67, 0x22, 0xb6, 0xe1, 0x07, - 0x14, 0xb1, 0x49, 0x02, 0x38, 0xa6, 0x85, 0xee, 0xc2, 0x29, 0xe3, 0x2d, 0xc5, 0x97, 0x08, 0x09, - 0x85, 0x2f, 0xff, 0x13, 0x85, 0x8f, 0x28, 0xa1, 0x11, 0x3f, 0x2b, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, - 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xba, 0x91, 0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0x84, 0xa6, - 0x5b, 0x4c, 0x13, 0x46, 0xaf, 0xc0, 0xc8, 0x9b, 0x7e, 0xc8, 0x8e, 0x77, 0xc1, 0x56, 0x4b, 0x47, - 0xf0, 0x91, 0xd7, 0x6f, 0xd4, 0x59, 0xf9, 0xfd, 0x83, 0xb9, 0xd1, 0x9a, 0xdf, 0x94, 0x7f, 0xb1, - 0xaa, 0x80, 0xbe, 0xdf, 0x82, 0xd9, 0xf4, 0x63, 0x4d, 0x75, 0x7a, 0xbc, 0xff, 0x4e, 0xdb, 0xa2, - 0xd1, 0xd9, 0xe5, 0x5c, 0x72, 0xb8, 0xa0, 0x29, 0xfb, 0x97, 0x2c, 0x26, 0xa8, 0x13, 0x9a, 0x26, - 0x12, 0x76, 0x5b, 0xc7, 0x91, 0x30, 0x76, 0xd9, 0x50, 0x82, 0x3d, 0xb0, 0x85, 0xc4, 0x3f, 0xb7, - 0x98, 0x85, 0xc4, 0x31, 0xba, 0x42, 0xbc, 0x0e, 0x23, 0x91, 0x4c, 0xe4, 0x5b, 0x90, 0xe3, 0x56, - 0xeb, 0x14, 0xb3, 0x12, 0x51, 0x4c, 0xae, 0xca, 0xd9, 0xab, 0xc8, 0xd8, 0xff, 0x88, 0xcf, 0x80, - 0x84, 0x1c, 0x83, 0xae, 0xa1, 0x6a, 0xea, 0x1a, 0xe6, 0x7a, 0x7c, 0x41, 0x8e, 0xce, 0xe1, 0x1f, - 0x9a, 0xfd, 0x66, 0xc2, 0x9d, 0x77, 0xba, 0x69, 0x8e, 0xfd, 0x79, 0x0b, 0x20, 0x0e, 0xf1, 0xdc, - 0x47, 0xaa, 0xb6, 0x97, 0x28, 0x5b, 0xeb, 0x47, 0x7e, 0xc3, 0x6f, 0x09, 0x4d, 0xda, 0x63, 0xb1, - 0xba, 0x83, 0x97, 0xdf, 0xd7, 0x7e, 0x63, 0x85, 0x8d, 0xe6, 0x64, 0x40, 0xb9, 0x72, 0xac, 0x80, - 0x33, 0x82, 0xc9, 0x7d, 0xd1, 0x82, 0x93, 0x59, 0x76, 0xb5, 0xf4, 0x91, 0xc4, 0xc5, 0x5c, 0xca, - 0x6c, 0x4a, 0xcd, 0xe6, 0x2d, 0x51, 0x8e, 0x15, 0x46, 0xdf, 0x39, 0xf0, 0x0e, 0x17, 0x5b, 0xf9, - 0x06, 0x8c, 0xd7, 0x02, 0xa2, 0x5d, 0xae, 0xaf, 0xf2, 0x20, 0x05, 0xbc, 0x3f, 0xcf, 0x1c, 0x3a, - 0x40, 0x81, 0xfd, 0xe5, 0x12, 0x9c, 0xe4, 0xd6, 0x07, 0x0b, 0x7b, 0xbe, 0xdb, 0xac, 0xf9, 0x4d, - 0xe1, 0x3d, 0xf5, 0x09, 0x18, 0xeb, 0x68, 0xb2, 0xc9, 0xa2, 0x38, 0xa1, 0xba, 0x0c, 0x33, 0x96, - 0xa6, 0xe8, 0xa5, 0xd8, 0xa0, 0x85, 0x9a, 0x30, 0x46, 0xf6, 0xdc, 0x86, 0x52, 0x61, 0x97, 0x0e, - 0x7d, 0xd1, 0xa9, 0x56, 0x96, 0x35, 0x3a, 0xd8, 0xa0, 0xfa, 0x10, 0x32, 0x53, 0xdb, 0x3f, 0x66, - 0xc1, 0x23, 0x39, 0x51, 0x45, 0x69, 0x73, 0x77, 0x98, 0x9d, 0x87, 0x58, 0xb6, 0xaa, 0x39, 0x6e, - 0xfd, 0x81, 0x05, 0x14, 0x7d, 0x0c, 0x80, 0x5b, 0x6f, 0xd0, 0x57, 0x7a, 0xaf, 0xf0, 0x8b, 0x46, - 0xe4, 0x38, 0x2d, 0x08, 0x98, 0xac, 0x8f, 0x35, 0x5a, 0xf6, 0x17, 0x07, 0x60, 0x90, 0x67, 0xd1, - 0xaf, 0xc1, 0xf0, 0x0e, 0xcf, 0x13, 0x53, 0x38, 0x6f, 0x14, 0x57, 0xa6, 0x9e, 0x89, 0xe7, 0x4d, - 0x2b, 0xc5, 0x92, 0x0c, 0x5a, 0x83, 0x13, 0x3c, 0x5d, 0x4f, 0xab, 0x4a, 0x5a, 0xce, 0xbe, 0x14, - 0xfb, 0xf1, 0xdc, 0xb2, 0x4a, 0xfc, 0xb9, 0x9a, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xab, 0x30, 0x41, - 0x9f, 0x61, 0x7e, 0x37, 0x92, 0x94, 0x78, 0xa2, 0x1e, 0xf5, 0xee, 0xdb, 0x30, 0xa0, 0x38, 0x81, - 0x8d, 0x5e, 0x81, 0xf1, 0x4e, 0x4a, 0xc0, 0x39, 0x18, 0x4b, 0x02, 0x4c, 0xa1, 0xa6, 0x89, 0xcb, - 0x4c, 0x6b, 0xbb, 0xcc, 0x90, 0x78, 0x63, 0x27, 0x20, 0xe1, 0x8e, 0xdf, 0x6a, 0x32, 0xf6, 0x6f, - 0x50, 0x33, 0xad, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0xa5, 0xb2, 0xe5, 0xb8, 0xad, 0x6e, 0x40, 0x62, - 0x2a, 0x43, 0x26, 0x95, 0x95, 0x04, 0x1c, 0xa7, 0x6a, 0xf4, 0x96, 0xdc, 0x0e, 0x1f, 0x8d, 0xe4, - 0xd6, 0xfe, 0x5b, 0x25, 0x30, 0xa6, 0xf6, 0x5b, 0x37, 0x81, 0x10, 0xfd, 0xb2, 0xed, 0xa0, 0xd3, - 0x10, 0x96, 0x31, 0x99, 0x5f, 0x16, 0xe7, 0x05, 0xe5, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, - 0xf1, 0x53, 0xb5, 0xc0, 0xa7, 0x97, 0x9c, 0x0c, 0x63, 0xa5, 0x2c, 0xd8, 0x87, 0xa5, 0x77, 0x6f, - 0x41, 0xc0, 0x47, 0x61, 0xe3, 0xcb, 0x29, 0x18, 0x46, 0x24, 0x75, 0xe1, 0x6b, 0x2f, 0xa9, 0xa0, - 0xcb, 0x30, 0x2a, 0xb2, 0xc2, 0x30, 0x43, 0x6b, 0xbe, 0x99, 0x98, 0xd1, 0x4b, 0x35, 0x2e, 0xc6, - 0x3a, 0x8e, 0xfd, 0x03, 0x25, 0x38, 0x91, 0xe1, 0x29, 0xc3, 0xaf, 0x91, 0x6d, 0x37, 0x8c, 0x54, - 0xea, 0x51, 0xed, 0x1a, 0xe1, 0xe5, 0x58, 0x61, 0xd0, 0xb3, 0x8a, 0x5f, 0x54, 0xc9, 0xcb, 0x49, - 0x58, 0xa2, 0x0b, 0xe8, 0x21, 0x93, 0x78, 0x9e, 0x87, 0x81, 0x6e, 0x48, 0x64, 0xa8, 0x56, 0x75, - 0x6d, 0x33, 0xb5, 0x26, 0x83, 0xd0, 0x67, 0xd4, 0xb6, 0xd2, 0x10, 0x6a, 0xcf, 0x28, 0xae, 0x23, - 0xe4, 0x30, 0xda, 0xb9, 0x88, 0x78, 0x8e, 0x17, 0x89, 0xc7, 0x56, 0x1c, 0x73, 0x90, 0x95, 0x62, - 0x01, 0xb5, 0xbf, 0x50, 0x86, 0x33, 0xb9, 0xbe, 0x73, 0xb4, 0xeb, 0x6d, 0xdf, 0x73, 0x23, 0x5f, - 0x59, 0x13, 0xf1, 0x38, 0x83, 0xa4, 0xb3, 0xb3, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0x90, - 0x09, 0x45, 0x53, 0x49, 0x58, 0x17, 0xab, 0x3c, 0xf0, 0x14, 0x07, 0xf7, 0x9d, 0x37, 0xfb, 0x09, - 0xca, 0xc1, 0xf8, 0xad, 0xe4, 0x85, 0x42, 0xbb, 0xeb, 0xfb, 0x2d, 0xcc, 0x80, 0xe8, 0x7d, 0x62, - 0xbc, 0x12, 0xe6, 0x33, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x68, 0x4f, 0xc1, 0xf0, 0x2e, 0xd9, 0x0f, - 0x5c, 0x6f, 0x3b, 0x69, 0x56, 0x75, 0x8d, 0x17, 0x63, 0x09, 0x37, 0xb3, 0x06, 0x0e, 0x1f, 0x75, - 0xc2, 0xeb, 0x91, 0x9e, 0xec, 0xc9, 0x0f, 0x95, 0x61, 0x12, 0x2f, 0x56, 0xdf, 0x9d, 0x88, 0x9b, - 0xe9, 0x89, 0x38, 0xea, 0x84, 0xd7, 0xbd, 0x67, 0xe3, 0xe7, 0x2d, 0x98, 0x64, 0xb9, 0x69, 0x84, - 0x87, 0xbc, 0xeb, 0x7b, 0xc7, 0xf0, 0x14, 0x78, 0x02, 0x06, 0x03, 0xda, 0x68, 0x32, 0xfb, 0x2a, - 0xeb, 0x09, 0xe6, 0x30, 0xf4, 0x18, 0x0c, 0xb0, 0x2e, 0xd0, 0xc9, 0x1b, 0xe3, 0x47, 0x70, 0xd5, - 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xd8, 0x25, 0x4c, 0x3a, 0x2d, 0x97, 0x77, 0x3a, 0x56, 0x59, 0xbf, - 0x33, 0xbc, 0xea, 0x33, 0xbb, 0xf6, 0xf6, 0xc2, 0x2e, 0x65, 0x93, 0x2c, 0x7e, 0x66, 0xff, 0x51, - 0x09, 0xce, 0x65, 0xd6, 0xeb, 0x3b, 0xec, 0x52, 0x71, 0xed, 0x87, 0x99, 0x7d, 0xa4, 0x7c, 0x8c, - 0x46, 0xab, 0x03, 0xfd, 0x72, 0xff, 0x83, 0x7d, 0x44, 0x43, 0xca, 0x1c, 0xb2, 0x77, 0x48, 0x34, - 0xa4, 0xcc, 0xbe, 0xe5, 0x88, 0x09, 0xfe, 0xbc, 0x94, 0xf3, 0x2d, 0x4c, 0x60, 0x70, 0x91, 0x9e, - 0x33, 0x0c, 0x18, 0xca, 0x47, 0x38, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xb4, 0x00, 0x93, 0x6d, - 0xd7, 0xa3, 0x87, 0xcf, 0xbe, 0xc9, 0x8a, 0xab, 0x60, 0x75, 0x6b, 0x26, 0x18, 0x27, 0xf1, 0x91, - 0xab, 0x45, 0x4a, 0xe2, 0x5f, 0xf7, 0xca, 0xa1, 0x76, 0xdd, 0xbc, 0xa9, 0xce, 0x57, 0xa3, 0x98, - 0x11, 0x35, 0x69, 0x4d, 0x93, 0x13, 0x95, 0xfb, 0x97, 0x13, 0x8d, 0x65, 0xcb, 0x88, 0x66, 0x5f, - 0x81, 0xf1, 0x07, 0x56, 0x0c, 0xd8, 0x5f, 0x2b, 0xc3, 0xa3, 0x05, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, - 0x1c, 0x68, 0x67, 0x7d, 0x6a, 0x1e, 0x6a, 0x70, 0x72, 0xab, 0xdb, 0x6a, 0xed, 0x33, 0x5f, 0x0e, - 0xd2, 0x94, 0x18, 0x82, 0xa7, 0x94, 0xc2, 0x91, 0x93, 0x2b, 0x19, 0x38, 0x38, 0xb3, 0x26, 0x7d, - 0x62, 0xd1, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0x3c, 0xb1, 0xb0, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x02, - 0xd3, 0xce, 0x9e, 0xe3, 0xf2, 0x70, 0xd3, 0x92, 0x00, 0x7f, 0x63, 0x29, 0x79, 0xee, 0x42, 0x12, - 0x01, 0xa7, 0xeb, 0xa0, 0xd7, 0x00, 0xf9, 0x9b, 0xcc, 0xe2, 0xbb, 0x79, 0x85, 0x78, 0x42, 0xeb, - 0xca, 0xe6, 0xae, 0x1c, 0x1f, 0x09, 0x37, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x22, 0xd0, 0x50, - 0x7e, 0x44, 0xa0, 0xe2, 0x73, 0xb1, 0x67, 0xe2, 0x9b, 0xff, 0x64, 0xd1, 0xeb, 0x8b, 0x33, 0xf9, - 0x66, 0x00, 0xcd, 0x57, 0x98, 0xd5, 0x24, 0x97, 0xf5, 0x6a, 0xf1, 0x53, 0x4e, 0x69, 0x56, 0x93, - 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x84, 0xb1, 0xdb, 0xae, 0xc1, 0xe2, 0x8b, 0x28, 0x5f, 0x0a, - 0x03, 0x7d, 0x1c, 0x86, 0x9b, 0xee, 0x9e, 0x1b, 0x0a, 0x49, 0xd7, 0xa1, 0xd5, 0x4a, 0xf1, 0x39, - 0x58, 0xe5, 0x64, 0xb0, 0xa4, 0x67, 0xff, 0x50, 0x09, 0xc6, 0x65, 0x8b, 0xaf, 0x77, 0xfd, 0xc8, - 0x39, 0x86, 0x6b, 0xf9, 0x8a, 0x71, 0x2d, 0xbf, 0xaf, 0x28, 0xd4, 0x19, 0xeb, 0x52, 0xee, 0x75, - 0x7c, 0x23, 0x71, 0x1d, 0x3f, 0xd9, 0x9b, 0x54, 0xf1, 0x35, 0xfc, 0x8f, 0x2d, 0x98, 0x36, 0xf0, - 0x8f, 0xe1, 0x36, 0x58, 0x31, 0x6f, 0x83, 0xc7, 0x7b, 0x7e, 0x43, 0xce, 0x2d, 0xf0, 0xbd, 0xe5, - 0x44, 0xdf, 0xd9, 0xe9, 0xff, 0x26, 0x0c, 0xec, 0x38, 0x41, 0xb3, 0x28, 0xb5, 0x43, 0xaa, 0xd2, - 0xfc, 0x55, 0x27, 0x10, 0x6a, 0xe7, 0x67, 0xe4, 0xa8, 0xd3, 0xa2, 0x9e, 0x2a, 0x67, 0xd6, 0x14, - 0x7a, 0x09, 0x86, 0xc2, 0x86, 0xdf, 0x51, 0x9e, 0x1c, 0xe7, 0xd9, 0x40, 0xb3, 0x92, 0xfb, 0x07, - 0x73, 0xc8, 0x6c, 0x8e, 0x16, 0x63, 0x81, 0x8f, 0x3e, 0x01, 0xe3, 0xec, 0x97, 0xb2, 0x01, 0x2b, - 0xe7, 0x8b, 0x23, 0xea, 0x3a, 0x22, 0x37, 0x90, 0x34, 0x8a, 0xb0, 0x49, 0x6a, 0x76, 0x1b, 0x2a, - 0xea, 0xb3, 0x1e, 0xaa, 0xde, 0xf6, 0xdf, 0x97, 0xe1, 0x44, 0xc6, 0x9a, 0x43, 0xa1, 0x31, 0x13, - 0x97, 0xfb, 0x5c, 0xaa, 0x6f, 0x73, 0x2e, 0x42, 0xf6, 0x1a, 0x6a, 0x8a, 0xb5, 0xd5, 0x77, 0xa3, - 0x37, 0x43, 0x92, 0x6c, 0x94, 0x16, 0xf5, 0x6e, 0x94, 0x36, 0x76, 0x6c, 0x43, 0x4d, 0x1b, 0x52, - 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xa7, 0x65, 0x38, 0x99, 0x15, 0x7d, 0x11, 0x7d, 0x36, 0x91, 0x58, - 0xf4, 0x85, 0x7e, 0xe3, 0x36, 0xf2, 0x6c, 0xa3, 0x22, 0x20, 0xdc, 0xbc, 0x99, 0x6a, 0xb4, 0xe7, - 0x30, 0x8b, 0x36, 0x59, 0x48, 0x8a, 0x80, 0x27, 0x84, 0x95, 0xc7, 0xc7, 0x07, 0xfb, 0xee, 0x80, - 0xc8, 0x24, 0x1b, 0x26, 0xec, 0x4b, 0x64, 0x71, 0x6f, 0xfb, 0x12, 0xd9, 0xf2, 0xac, 0x0b, 0xa3, - 0xda, 0xd7, 0x3c, 0xd4, 0x19, 0xdf, 0xa5, 0xb7, 0x95, 0xd6, 0xef, 0x87, 0x3a, 0xeb, 0x3f, 0x66, - 0x41, 0xc2, 0xe5, 0x40, 0x89, 0xc5, 0xac, 0x5c, 0xb1, 0xd8, 0x79, 0x18, 0x08, 0xfc, 0x16, 0x49, - 0x66, 0xe0, 0xc4, 0x7e, 0x8b, 0x60, 0x06, 0xa1, 0x18, 0x51, 0x2c, 0xec, 0x18, 0xd3, 0x1f, 0x72, - 0xe2, 0x89, 0xf6, 0x04, 0x0c, 0xb6, 0xc8, 0x1e, 0x69, 0x25, 0x13, 0x25, 0x5d, 0xa7, 0x85, 0x98, - 0xc3, 0xec, 0x9f, 0x1f, 0x80, 0xb3, 0x85, 0x41, 0x5d, 0xe8, 0x73, 0x68, 0xdb, 0x89, 0xc8, 0x1d, - 0x67, 0x3f, 0x99, 0xd1, 0xe4, 0x0a, 0x2f, 0xc6, 0x12, 0xce, 0x3c, 0xc9, 0x78, 0x60, 0xf2, 0x84, - 0x10, 0x51, 0xc4, 0x23, 0x17, 0x50, 0x53, 0x28, 0x55, 0x3e, 0x0a, 0xa1, 0xd4, 0x73, 0x00, 0x61, - 0xd8, 0xe2, 0x86, 0x59, 0x4d, 0xe1, 0xa2, 0x16, 0x07, 0xb0, 0xaf, 0x5f, 0x17, 0x10, 0xac, 0x61, - 0xa1, 0x2a, 0x4c, 0x75, 0x02, 0x3f, 0xe2, 0x32, 0xd9, 0x2a, 0xb7, 0x5d, 0x1c, 0x34, 0xe3, 0x69, - 0xd4, 0x12, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x84, 0x51, 0x11, 0x63, 0xa3, 0xe6, 0xfb, 0x2d, 0x21, - 0x06, 0x52, 0xe6, 0x7c, 0xf5, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xf4, 0x0e, 0x67, 0x56, - 0xe3, 0xc2, 0x5e, 0x0d, 0x2f, 0x11, 0x89, 0x75, 0xa4, 0xaf, 0x48, 0xac, 0xb1, 0x60, 0xac, 0xd2, - 0xb7, 0xde, 0x11, 0x7a, 0x8a, 0x92, 0x7e, 0x76, 0x00, 0x4e, 0x88, 0x85, 0xf3, 0xb0, 0x97, 0xcb, - 0xcd, 0xf4, 0x72, 0x39, 0x0a, 0xd1, 0xd9, 0xbb, 0x6b, 0xe6, 0xb8, 0xd7, 0xcc, 0x0f, 0x5b, 0x60, - 0xb2, 0x57, 0xe8, 0xff, 0xcb, 0x4d, 0x09, 0xf5, 0x62, 0x2e, 0xbb, 0xa6, 0xa2, 0x7a, 0xbe, 0xcd, - 0xe4, 0x50, 0xf6, 0x7f, 0xb4, 0xe0, 0xf1, 0x9e, 0x14, 0xd1, 0x32, 0x54, 0x18, 0x0f, 0xa8, 0xbd, - 0xce, 0x9e, 0x54, 0xb6, 0xcd, 0x12, 0x90, 0xc3, 0x92, 0xc6, 0x35, 0xd1, 0x72, 0x2a, 0xf7, 0xd6, - 0x53, 0x19, 0xb9, 0xb7, 0x4e, 0x19, 0xc3, 0xf3, 0x80, 0xc9, 0xb7, 0x7e, 0x90, 0xde, 0x38, 0x86, - 0x5f, 0x11, 0xfa, 0xa0, 0x21, 0xf6, 0xb3, 0x13, 0x62, 0x3f, 0x64, 0x62, 0x6b, 0x77, 0xc8, 0x47, - 0x61, 0x8a, 0x05, 0xdf, 0x62, 0x96, 0xf6, 0xc2, 0xe3, 0xa9, 0x14, 0x5b, 0xd3, 0x5e, 0x4f, 0xc0, - 0x70, 0x0a, 0xdb, 0xfe, 0xc3, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x6f, 0xc2, 0xa7, 0xa1, 0xe2, - 0xb6, 0xdb, 0x5d, 0x9e, 0x4e, 0x69, 0x90, 0xfb, 0x46, 0xd3, 0x79, 0x5a, 0x95, 0x85, 0x38, 0x86, - 0xa3, 0x15, 0x21, 0x71, 0x2e, 0x88, 0xef, 0xc9, 0x3b, 0x3e, 0x5f, 0x75, 0x22, 0x87, 0x33, 0x38, - 0xea, 0x9e, 0x8d, 0x65, 0xd3, 0xe8, 0x53, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x36, 0x2d, 0x13, 0x61, - 0x85, 0xdf, 0x5f, 0x40, 0xad, 0xae, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, - 0xf3, 0xc6, 0x4d, 0x3f, 0x9b, 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0xfd, 0x10, 0x54, 0x14, - 0xf1, 0x5e, 0xf2, 0xa7, 0x31, 0x9d, 0x2d, 0xfa, 0x08, 0x4c, 0x26, 0xfa, 0x76, 0x28, 0xf1, 0xd5, - 0x2f, 0x58, 0x30, 0xc9, 0x3b, 0xb3, 0xec, 0xed, 0x89, 0xdb, 0xe0, 0x2d, 0x38, 0xd9, 0xca, 0x38, - 0x95, 0xc5, 0xf4, 0xf7, 0x7f, 0x8a, 0x2b, 0x71, 0x55, 0x16, 0x14, 0x67, 0xb6, 0x81, 0x2e, 0xd2, - 0x1d, 0x47, 0x4f, 0x5d, 0xa7, 0x25, 0x5c, 0xa5, 0xc7, 0xf8, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, - 0xdf, 0xb1, 0x60, 0x9a, 0xf7, 0xfc, 0x1a, 0xd9, 0x57, 0x67, 0xd3, 0x37, 0xb2, 0xef, 0x22, 0x91, - 0x5f, 0x29, 0x27, 0x91, 0x9f, 0xfe, 0x69, 0xe5, 0xc2, 0x4f, 0xfb, 0xb2, 0x05, 0x62, 0x85, 0x1c, - 0x83, 0x10, 0xe2, 0xdb, 0x4d, 0x21, 0xc4, 0x6c, 0xfe, 0x26, 0xc8, 0x91, 0x3e, 0xfc, 0x99, 0x05, - 0x53, 0x1c, 0x21, 0xd6, 0x96, 0x7f, 0x43, 0xe7, 0xa1, 0x9f, 0x74, 0xdf, 0xd7, 0xc8, 0xfe, 0x86, - 0x5f, 0x73, 0xa2, 0x9d, 0xec, 0x8f, 0x32, 0x26, 0x6b, 0xa0, 0x70, 0xb2, 0x9a, 0x72, 0x03, 0x19, - 0x79, 0x6e, 0x7a, 0xc4, 0x8f, 0x38, 0x6c, 0x9e, 0x1b, 0xfb, 0xeb, 0x16, 0x20, 0xde, 0x8c, 0xc1, - 0xb8, 0x51, 0x76, 0x88, 0x95, 0x6a, 0x17, 0x5d, 0x7c, 0x34, 0x29, 0x08, 0xd6, 0xb0, 0x8e, 0x64, - 0x78, 0x12, 0x26, 0x0f, 0xe5, 0xde, 0x26, 0x0f, 0x87, 0x18, 0xd1, 0x7f, 0x33, 0x04, 0x49, 0xdf, - 0x2a, 0x74, 0x0b, 0xc6, 0x1a, 0x4e, 0xc7, 0xd9, 0x74, 0x5b, 0x6e, 0xe4, 0x92, 0xb0, 0xc8, 0x1e, - 0x6a, 0x49, 0xc3, 0x13, 0x4a, 0x6a, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, 0x27, 0x70, 0xf7, - 0xdc, 0x16, 0xd9, 0x66, 0xb2, 0x12, 0x16, 0x9c, 0x81, 0x1b, 0x67, 0xc9, 0x52, 0xac, 0x61, 0x64, - 0x38, 0xb2, 0x97, 0x1f, 0xb2, 0x23, 0x3b, 0x1c, 0x9b, 0x23, 0xfb, 0xc0, 0xa1, 0x1c, 0xd9, 0x47, - 0x0e, 0xed, 0xc8, 0x3e, 0xd8, 0x97, 0x23, 0x3b, 0x86, 0xd3, 0x92, 0xf7, 0xa4, 0xff, 0x57, 0xdc, - 0x16, 0x11, 0x0f, 0x0e, 0x1e, 0x51, 0x62, 0xf6, 0xde, 0xc1, 0xdc, 0x69, 0x9c, 0x89, 0x81, 0x73, - 0x6a, 0xa2, 0x8f, 0xc1, 0x8c, 0xd3, 0x6a, 0xf9, 0x77, 0xd4, 0xa4, 0x2e, 0x87, 0x0d, 0xa7, 0xc5, - 0x95, 0x10, 0xc3, 0x8c, 0xea, 0x63, 0xf7, 0x0e, 0xe6, 0x66, 0x16, 0x72, 0x70, 0x70, 0x6e, 0x6d, - 0xf4, 0x61, 0xa8, 0x74, 0x02, 0xbf, 0xb1, 0xa6, 0x39, 0x80, 0x9e, 0xa3, 0x03, 0x58, 0x93, 0x85, - 0xf7, 0x0f, 0xe6, 0xc6, 0xd5, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x3c, 0xd3, 0x47, 0x8f, 0xd4, - 0x33, 0x7d, 0x17, 0x4e, 0xd4, 0x49, 0xe0, 0x3a, 0x2d, 0xf7, 0x2d, 0xca, 0x2f, 0xcb, 0xf3, 0x69, - 0x03, 0x2a, 0x41, 0xe2, 0x44, 0xee, 0x2b, 0xe6, 0xa6, 0x96, 0x70, 0x44, 0x9e, 0xc0, 0x31, 0x21, - 0xfb, 0x7f, 0x5b, 0x30, 0x2c, 0x7c, 0xa9, 0x8e, 0x81, 0x6b, 0x5c, 0x30, 0x34, 0x09, 0x73, 0xd9, - 0x03, 0xc6, 0x3a, 0x93, 0xab, 0x43, 0x58, 0x4d, 0xe8, 0x10, 0x1e, 0x2f, 0x22, 0x52, 0xac, 0x3d, - 0xf8, 0x6b, 0x65, 0xca, 0xbd, 0x1b, 0x5e, 0xbd, 0x0f, 0x7f, 0x08, 0xd6, 0x61, 0x38, 0x14, 0x5e, - 0xa5, 0xa5, 0x7c, 0x9f, 0x86, 0xe4, 0x24, 0xc6, 0x76, 0x6c, 0xc2, 0x8f, 0x54, 0x12, 0xc9, 0x74, - 0x57, 0x2d, 0x3f, 0x44, 0x77, 0xd5, 0x5e, 0x7e, 0xcf, 0x03, 0x47, 0xe1, 0xf7, 0x6c, 0x7f, 0x95, - 0xdd, 0x9c, 0x7a, 0xf9, 0x31, 0x30, 0x55, 0x57, 0xcc, 0x3b, 0xd6, 0x2e, 0x58, 0x59, 0xa2, 0x53, - 0x39, 0xcc, 0xd5, 0xcf, 0x59, 0x70, 0x36, 0xe3, 0xab, 0x34, 0x4e, 0xeb, 0x19, 0x18, 0x71, 0xba, - 0x4d, 0x57, 0xed, 0x65, 0x4d, 0x9f, 0xb8, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0xee, - 0x76, 0x5c, 0xae, 0x4a, 0xd5, 0xcd, 0x7f, 0xcb, 0xdc, 0x01, 0x6f, 0x39, 0x09, 0xc4, 0x69, 0x7c, - 0x15, 0x6b, 0xa6, 0x9c, 0x1b, 0x6b, 0xe6, 0xef, 0x5a, 0x30, 0xaa, 0xfc, 0x2a, 0x1f, 0xfa, 0x68, - 0x7f, 0xd4, 0x1c, 0xed, 0x47, 0x0b, 0x46, 0x3b, 0x67, 0x98, 0x7f, 0xab, 0xa4, 0xfa, 0x5b, 0xf3, - 0x83, 0xa8, 0x0f, 0x0e, 0xee, 0xc1, 0x5d, 0x17, 0x2e, 0xc3, 0xa8, 0xd3, 0xe9, 0x48, 0x80, 0xb4, - 0x41, 0x63, 0x11, 0x94, 0xe3, 0x62, 0xac, 0xe3, 0x28, 0x4f, 0x8a, 0x72, 0xae, 0x27, 0x45, 0x13, - 0x20, 0x72, 0x82, 0x6d, 0x12, 0xd1, 0x32, 0x61, 0x32, 0x9b, 0x7f, 0xde, 0x74, 0x23, 0xb7, 0x35, - 0xef, 0x7a, 0x51, 0x18, 0x05, 0xf3, 0xab, 0x5e, 0x74, 0x23, 0xe0, 0x4f, 0x48, 0x2d, 0x5a, 0x93, - 0xa2, 0x85, 0x35, 0xba, 0x32, 0x86, 0x00, 0x6b, 0x63, 0xd0, 0x34, 0x66, 0x58, 0x17, 0xe5, 0x58, - 0x61, 0xd8, 0x1f, 0x62, 0xb7, 0x0f, 0x1b, 0xd3, 0xc3, 0x45, 0x2a, 0xfa, 0xfb, 0x63, 0x6a, 0x36, - 0x98, 0x26, 0xb3, 0xaa, 0xc7, 0x43, 0x2a, 0x3e, 0xec, 0x69, 0xc3, 0xba, 0x5f, 0x5f, 0x1c, 0x34, - 0x09, 0x7d, 0x47, 0xca, 0x40, 0xe5, 0xd9, 0x1e, 0xb7, 0xc6, 0x21, 0x4c, 0x52, 0x58, 0x3a, 0x15, - 0x96, 0x6c, 0x62, 0xb5, 0x26, 0xf6, 0x85, 0x96, 0x4e, 0x45, 0x00, 0x70, 0x8c, 0x43, 0x99, 0x29, - 0xf5, 0x27, 0x9c, 0x41, 0x71, 0x58, 0x51, 0x85, 0x1d, 0x62, 0x0d, 0x03, 0x5d, 0x12, 0x02, 0x05, - 0xae, 0x17, 0x78, 0x34, 0x21, 0x50, 0x90, 0xc3, 0xa5, 0x49, 0x81, 0x2e, 0xc3, 0xa8, 0xca, 0xa0, - 0x5d, 0xe3, 0x89, 0x8c, 0xc4, 0x32, 0x5b, 0x8e, 0x8b, 0xb1, 0x8e, 0x83, 0x36, 0x60, 0x32, 0xe4, - 0x72, 0x36, 0x15, 0xeb, 0x99, 0xcb, 0x2b, 0xdf, 0x2f, 0xad, 0x80, 0xea, 0x26, 0xf8, 0x3e, 0x2b, - 0xe2, 0xa7, 0x93, 0xf4, 0xf3, 0x4f, 0x92, 0x40, 0xaf, 0xc2, 0x44, 0xcb, 0x77, 0x9a, 0x8b, 0x4e, - 0xcb, 0xf1, 0x1a, 0x6c, 0x7c, 0x46, 0xcc, 0x44, 0xac, 0xd7, 0x0d, 0x28, 0x4e, 0x60, 0x53, 0xe6, - 0x4d, 0x2f, 0x11, 0xf1, 0xc9, 0x1d, 0x6f, 0x9b, 0x84, 0x22, 0x1f, 0x32, 0x63, 0xde, 0xae, 0xe7, - 0xe0, 0xe0, 0xdc, 0xda, 0xe8, 0x25, 0x18, 0x93, 0x9f, 0xaf, 0x85, 0xc5, 0x88, 0x9d, 0x52, 0x34, - 0x18, 0x36, 0x30, 0xd1, 0x1d, 0x38, 0x25, 0xff, 0x6f, 0x04, 0xce, 0xd6, 0x96, 0xdb, 0x10, 0xbe, - 0xe2, 0xdc, 0x7b, 0x75, 0x41, 0xba, 0x58, 0x2e, 0x67, 0x21, 0xdd, 0x3f, 0x98, 0x3b, 0x2f, 0x46, - 0x2d, 0x13, 0xce, 0x26, 0x31, 0x9b, 0x3e, 0x5a, 0x83, 0x13, 0x3b, 0xc4, 0x69, 0x45, 0x3b, 0x4b, - 0x3b, 0xa4, 0xb1, 0x2b, 0x37, 0x1d, 0x0b, 0xb6, 0xa1, 0x39, 0x70, 0x5c, 0x4d, 0xa3, 0xe0, 0xac, - 0x7a, 0xe8, 0x0d, 0x98, 0xe9, 0x74, 0x37, 0x5b, 0x6e, 0xb8, 0xb3, 0xee, 0x47, 0xcc, 0x14, 0x48, - 0x25, 0xe4, 0x16, 0x51, 0x39, 0x54, 0x38, 0x93, 0x5a, 0x0e, 0x1e, 0xce, 0xa5, 0x80, 0xde, 0x82, - 0x53, 0x89, 0xc5, 0x20, 0xe2, 0x12, 0x4c, 0xe4, 0x67, 0x7b, 0xa8, 0x67, 0x55, 0x10, 0x21, 0x3e, - 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xf4, 0x32, 0x80, 0xdb, 0x59, 0x71, 0xda, 0x6e, 0x8b, 0x3e, 0x17, - 0x4f, 0xb0, 0x75, 0x42, 0x9f, 0x0e, 0xb0, 0x5a, 0x93, 0xa5, 0xf4, 0x7c, 0x16, 0xff, 0xf6, 0xb1, - 0x86, 0x8d, 0x6a, 0x30, 0x21, 0xfe, 0xed, 0x8b, 0x69, 0x9d, 0x56, 0x21, 0x00, 0x26, 0x64, 0x0d, - 0x35, 0x97, 0xc8, 0x2c, 0x61, 0xb3, 0x97, 0xa8, 0x8f, 0xb6, 0xe1, 0xac, 0xcc, 0xde, 0xa5, 0xaf, - 0x53, 0x39, 0x0f, 0x21, 0x4b, 0xb3, 0x30, 0xc2, 0xfd, 0x43, 0x16, 0x8a, 0x10, 0x71, 0x31, 0x1d, - 0x7a, 0xbf, 0xeb, 0xcb, 0x9d, 0x7b, 0xd0, 0x9e, 0xe2, 0xe6, 0x49, 0xf4, 0x7e, 0xbf, 0x9e, 0x04, - 0xe2, 0x34, 0x3e, 0x0a, 0xe1, 0x94, 0xeb, 0x65, 0xad, 0xee, 0xd3, 0x8c, 0xd0, 0x47, 0xb8, 0xf3, - 0x70, 0xf1, 0xca, 0xce, 0x84, 0xf3, 0x95, 0x9d, 0x49, 0xfb, 0xed, 0x59, 0xe1, 0xfd, 0xb6, 0x45, - 0x6b, 0x6b, 0x9c, 0x3a, 0xfa, 0x34, 0x8c, 0xe9, 0x1f, 0x26, 0xb8, 0x8e, 0x0b, 0xd9, 0x8c, 0xac, - 0x76, 0x3e, 0x70, 0x3e, 0x5f, 0x9d, 0x01, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x23, 0xc3, 0xcd, 0xfe, - 0x52, 0x7f, 0x5c, 0x4d, 0xff, 0x46, 0x68, 0x04, 0xb2, 0x97, 0x3d, 0xba, 0x0e, 0x23, 0x8d, 0x96, - 0x4b, 0xbc, 0x68, 0xb5, 0x56, 0x14, 0x4b, 0x6f, 0x49, 0xe0, 0x88, 0x7d, 0x24, 0xb2, 0x26, 0xf0, - 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x5a, 0x82, 0xb9, 0x1e, 0x29, 0x38, 0x12, 0x2a, 0x29, 0xab, 0x2f, - 0x95, 0xd4, 0x82, 0xcc, 0x3a, 0xbf, 0x9e, 0x90, 0x76, 0x25, 0x32, 0xca, 0xc7, 0x32, 0xaf, 0x24, - 0x7e, 0xdf, 0x2e, 0x02, 0xba, 0x56, 0x6b, 0xa0, 0xa7, 0x93, 0x8b, 0xa1, 0xcd, 0x1e, 0xec, 0xff, - 0x09, 0x9c, 0xab, 0x99, 0xb4, 0xbf, 0x5a, 0x82, 0x53, 0x6a, 0x08, 0xbf, 0x75, 0x07, 0xee, 0x66, - 0x7a, 0xe0, 0x8e, 0x40, 0xaf, 0x6b, 0xdf, 0x80, 0x21, 0x1e, 0x1c, 0xb0, 0x0f, 0xd6, 0xfb, 0x09, - 0x33, 0xf8, 0xae, 0xe2, 0xf6, 0x8c, 0x00, 0xbc, 0xdf, 0x6f, 0xc1, 0x64, 0xc2, 0xd7, 0x0c, 0x61, - 0xcd, 0x21, 0xf9, 0x41, 0xd8, 0xe3, 0x2c, 0xc6, 0xfb, 0x3c, 0x0c, 0xec, 0xf8, 0x61, 0x94, 0x34, - 0xfa, 0xb8, 0xea, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x77, 0x2d, 0x18, 0xdc, 0x70, 0x5c, 0x2f, 0x92, - 0x0a, 0x02, 0x2b, 0x47, 0x41, 0xd0, 0xcf, 0x77, 0xa1, 0x17, 0x61, 0x88, 0x6c, 0x6d, 0x91, 0x46, - 0x24, 0x66, 0x55, 0x46, 0x73, 0x18, 0x5a, 0x66, 0xa5, 0x94, 0x17, 0x64, 0x8d, 0xf1, 0xbf, 0x58, - 0x20, 0xa3, 0xdb, 0x50, 0x89, 0xdc, 0x36, 0x59, 0x68, 0x36, 0x85, 0xda, 0xfc, 0x01, 0x22, 0x52, - 0x6c, 0x48, 0x02, 0x38, 0xa6, 0x65, 0x7f, 0xa1, 0x04, 0x10, 0x47, 0x55, 0xea, 0xf5, 0x89, 0x8b, - 0x29, 0x85, 0xea, 0x85, 0x0c, 0x85, 0x2a, 0x8a, 0x09, 0x66, 0x68, 0x53, 0xd5, 0x30, 0x95, 0xfb, - 0x1a, 0xa6, 0x81, 0xc3, 0x0c, 0xd3, 0x12, 0x4c, 0xc7, 0x51, 0xa1, 0xcc, 0xa0, 0x78, 0xec, 0xfa, - 0xdc, 0x48, 0x02, 0x71, 0x1a, 0xdf, 0x26, 0x70, 0x5e, 0x05, 0xc7, 0x11, 0x37, 0x1a, 0xb3, 0xca, - 0xd6, 0x15, 0xd4, 0x3d, 0xc6, 0x29, 0xd6, 0x18, 0x97, 0x72, 0x35, 0xc6, 0x3f, 0x69, 0xc1, 0xc9, - 0x64, 0x3b, 0xcc, 0x85, 0xf9, 0xf3, 0x16, 0x9c, 0x62, 0x7a, 0x73, 0xd6, 0x6a, 0x5a, 0x4b, 0xff, - 0x42, 0x61, 0xc0, 0x9f, 0x9c, 0x1e, 0xc7, 0x61, 0x43, 0xd6, 0xb2, 0x48, 0xe3, 0xec, 0x16, 0xed, - 0xff, 0x50, 0x82, 0x99, 0xbc, 0x48, 0x41, 0xcc, 0x69, 0xc3, 0xb9, 0x5b, 0xdf, 0x25, 0x77, 0x84, - 0x69, 0x7c, 0xec, 0xb4, 0xc1, 0x8b, 0xb1, 0x84, 0x27, 0xb3, 0x2a, 0x94, 0xfa, 0xcc, 0xaa, 0xb0, - 0x03, 0xd3, 0x77, 0x76, 0x88, 0x77, 0xd3, 0x0b, 0x9d, 0xc8, 0x0d, 0xb7, 0x5c, 0xa6, 0x63, 0xe6, - 0xeb, 0x46, 0xa6, 0x62, 0x9d, 0xbe, 0x9d, 0x44, 0xb8, 0x7f, 0x30, 0x77, 0xd6, 0x28, 0x88, 0xbb, - 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0x4e, 0x4a, 0x31, 0xf0, 0x10, 0x93, 0x52, 0xd8, 0x9f, 0xb7, - 0xe0, 0x4c, 0x6e, 0x5e, 0x62, 0x74, 0x11, 0x46, 0x9c, 0x8e, 0xcb, 0xc5, 0xf4, 0xe2, 0x18, 0x65, - 0xe2, 0xa0, 0xda, 0x2a, 0x17, 0xd2, 0x2b, 0x28, 0x3d, 0xbd, 0x76, 0x5d, 0xaf, 0x99, 0x3c, 0xbd, - 0xae, 0xb9, 0x5e, 0x13, 0x33, 0x88, 0x3a, 0x8e, 0xcb, 0x79, 0xc7, 0xb1, 0xfd, 0x7d, 0x16, 0x08, - 0x87, 0xd3, 0x3e, 0xce, 0xee, 0x4f, 0xc0, 0xd8, 0x5e, 0x3a, 0x71, 0xd5, 0xf9, 0x7c, 0x0f, 0x5c, - 0x91, 0xae, 0x4a, 0x31, 0x64, 0x46, 0x92, 0x2a, 0x83, 0x96, 0xdd, 0x04, 0x01, 0xad, 0x12, 0x26, - 0x84, 0xee, 0xdd, 0x9b, 0xe7, 0x00, 0x9a, 0x0c, 0x97, 0x65, 0xb3, 0x2c, 0x99, 0x37, 0x73, 0x55, - 0x41, 0xb0, 0x86, 0x65, 0xff, 0xbb, 0x12, 0x8c, 0xca, 0x44, 0x49, 0x5d, 0xaf, 0x1f, 0x51, 0xd1, - 0xa1, 0x32, 0xa7, 0xa2, 0x4b, 0x50, 0x61, 0xb2, 0xcc, 0x5a, 0x2c, 0x61, 0x53, 0x92, 0x84, 0x35, - 0x09, 0xc0, 0x31, 0x0e, 0xdd, 0x45, 0x61, 0x77, 0x93, 0xa1, 0x27, 0xdc, 0x23, 0xeb, 0xbc, 0x18, - 0x4b, 0x38, 0xfa, 0x18, 0x4c, 0xf1, 0x7a, 0x81, 0xdf, 0x71, 0xb6, 0xb9, 0xfe, 0x63, 0x50, 0xc5, - 0x9c, 0x98, 0x5a, 0x4b, 0xc0, 0xee, 0x1f, 0xcc, 0x9d, 0x4c, 0x96, 0x31, 0xc5, 0x5e, 0x8a, 0x0a, - 0x33, 0x73, 0xe2, 0x8d, 0xd0, 0xdd, 0x9f, 0xb2, 0x8e, 0x8a, 0x41, 0x58, 0xc7, 0xb3, 0x3f, 0x0d, - 0x28, 0x9d, 0x32, 0x0a, 0xbd, 0xc6, 0x6d, 0x5b, 0xdd, 0x80, 0x34, 0x8b, 0x14, 0x7d, 0x7a, 0x64, - 0x05, 0xe9, 0xd9, 0xc4, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0x8b, 0x65, 0x98, 0x4a, 0xfa, 0x72, 0xa3, - 0xab, 0x30, 0xc4, 0x59, 0x0f, 0x41, 0xbe, 0xc0, 0x8e, 0x44, 0xf3, 0x00, 0x67, 0x87, 0xb0, 0xe0, - 0x5e, 0x44, 0x7d, 0xf4, 0x06, 0x8c, 0x36, 0xfd, 0x3b, 0xde, 0x1d, 0x27, 0x68, 0x2e, 0xd4, 0x56, - 0xc5, 0x72, 0xce, 0x7c, 0xd8, 0x56, 0x63, 0x34, 0xdd, 0xab, 0x9c, 0xe9, 0x4c, 0x63, 0x10, 0xd6, - 0xc9, 0xa1, 0x0d, 0x16, 0x67, 0x7e, 0xcb, 0xdd, 0x5e, 0x73, 0x3a, 0x45, 0x8e, 0x0e, 0x4b, 0x12, - 0x49, 0xa3, 0x3c, 0x2e, 0x82, 0xd1, 0x73, 0x00, 0x8e, 0x09, 0xa1, 0xcf, 0xc2, 0x89, 0x30, 0x47, - 0xdc, 0x9e, 0x97, 0x41, 0xb0, 0x48, 0x02, 0xbd, 0xf8, 0xc8, 0xbd, 0x83, 0xb9, 0x13, 0x59, 0x82, - 0xf9, 0xac, 0x66, 0xec, 0x2f, 0x9e, 0x04, 0x63, 0x13, 0x1b, 0x09, 0x65, 0xad, 0x23, 0x4a, 0x28, - 0x8b, 0x61, 0x84, 0xb4, 0x3b, 0xd1, 0x7e, 0xd5, 0x0d, 0x8a, 0xd2, 0xea, 0x2f, 0x0b, 0x9c, 0x34, - 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0xd6, 0xdf, 0xf2, 0x37, 0x30, 0xeb, 0xef, 0xc0, 0x31, 0x66, - 0xfd, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xe3, 0x0b, 0xa6, 0x3f, 0x73, 0x1d, 0x5e, 0xe1, - 0x28, 0xe9, 0xfc, 0x92, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x9a, 0xda, 0x81, 0x43, 0xf9, 0x0f, 0xf3, - 0xb4, 0xc1, 0x43, 0xe6, 0x1e, 0x14, 0xb9, 0x7d, 0x87, 0x1f, 0x34, 0xb7, 0xef, 0x8a, 0xcc, 0xc8, - 0x3b, 0x92, 0xef, 0x95, 0xc4, 0x12, 0xee, 0xf6, 0xc8, 0xc3, 0x7b, 0x4b, 0xcf, 0x62, 0x5c, 0xc9, - 0x3f, 0x09, 0x54, 0x82, 0xe2, 0x3e, 0x73, 0x17, 0x7f, 0x9f, 0x05, 0xa7, 0x3a, 0x59, 0x09, 0xbd, - 0x85, 0x6d, 0xc0, 0x8b, 0x7d, 0xe7, 0x0c, 0x37, 0x1a, 0x64, 0x32, 0xb5, 0xec, 0xac, 0xf0, 0xd9, - 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9b, 0x42, 0x47, 0xfd, 0x44, 0x4e, 0x12, 0xe4, 0x82, 0xd4, 0xc7, - 0x1b, 0x19, 0x09, 0x77, 0xdf, 0x9b, 0x97, 0x70, 0xb7, 0xef, 0x34, 0xbb, 0xaf, 0xa9, 0xf4, 0xc7, - 0xe3, 0xf9, 0x4b, 0x89, 0x27, 0x37, 0xee, 0x99, 0xf4, 0xf8, 0x35, 0x95, 0xf4, 0xb8, 0x20, 0x1e, - 0x30, 0x4f, 0x69, 0xdc, 0x33, 0xd5, 0xb1, 0x96, 0xae, 0x78, 0xf2, 0x68, 0xd2, 0x15, 0x1b, 0x57, - 0x0d, 0xcf, 0x98, 0xfb, 0x74, 0x8f, 0xab, 0xc6, 0xa0, 0x5b, 0x7c, 0xd9, 0xf0, 0xd4, 0xcc, 0xd3, - 0x0f, 0x94, 0x9a, 0xf9, 0x96, 0x9e, 0xea, 0x18, 0xf5, 0xc8, 0xe5, 0x4b, 0x91, 0xfa, 0x4c, 0x70, - 0x7c, 0x4b, 0xbf, 0x00, 0x4f, 0xe4, 0xd3, 0x55, 0xf7, 0x5c, 0x9a, 0x6e, 0xe6, 0x15, 0x98, 0x4a, - 0x9c, 0x7c, 0xf2, 0x78, 0x12, 0x27, 0x9f, 0x3a, 0xf2, 0xc4, 0xc9, 0xa7, 0x8f, 0x21, 0x71, 0xf2, - 0x23, 0xc7, 0x98, 0x38, 0xf9, 0x16, 0x33, 0xa8, 0xe1, 0x61, 0x7b, 0x44, 0xfc, 0xe2, 0xa7, 0x72, - 0xa2, 0x5e, 0xa5, 0x63, 0xfb, 0xf0, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0xca, 0x48, 0xc8, 0x3c, 0xf3, - 0x10, 0x12, 0x32, 0xaf, 0xc7, 0x09, 0x99, 0xcf, 0xe4, 0x4f, 0x75, 0x86, 0x0b, 0x46, 0x4e, 0x1a, - 0xe6, 0x5b, 0x7a, 0xfa, 0xe4, 0x47, 0x0b, 0xb4, 0x26, 0x59, 0x82, 0xc7, 0x82, 0xa4, 0xc9, 0xaf, - 0xf2, 0xa4, 0xc9, 0x8f, 0xe5, 0x9f, 0xe4, 0xc9, 0xeb, 0xce, 0x48, 0x95, 0x4c, 0xfb, 0xa5, 0xc2, - 0x5e, 0xb2, 0x48, 0xcd, 0x39, 0xfd, 0x52, 0x71, 0x33, 0xd3, 0xfd, 0x52, 0x20, 0x1c, 0x93, 0xb2, - 0x7f, 0xa0, 0x04, 0xe7, 0x8a, 0xf7, 0x5b, 0x2c, 0x4d, 0xad, 0xc5, 0x4a, 0xe4, 0x84, 0x34, 0x95, - 0xbf, 0xd9, 0x62, 0xac, 0xbe, 0xa3, 0xf8, 0x5d, 0x81, 0x69, 0xe5, 0xbb, 0xd1, 0x72, 0x1b, 0xfb, - 0xeb, 0xf1, 0xcb, 0x57, 0xf9, 0xbb, 0xd7, 0x93, 0x08, 0x38, 0x5d, 0x07, 0x2d, 0xc0, 0xa4, 0x51, - 0xb8, 0x5a, 0x15, 0x6f, 0x33, 0x25, 0xbe, 0xad, 0x9b, 0x60, 0x9c, 0xc4, 0xb7, 0xbf, 0x64, 0xc1, - 0x23, 0x39, 0x19, 0x07, 0xfb, 0x0e, 0x52, 0xb7, 0x05, 0x93, 0x1d, 0xb3, 0x6a, 0x8f, 0xb8, 0x9a, - 0x46, 0x5e, 0x43, 0xd5, 0xd7, 0x04, 0x00, 0x27, 0x89, 0xda, 0x3f, 0x5d, 0x82, 0xb3, 0x85, 0xc6, - 0x88, 0x08, 0xc3, 0xe9, 0xed, 0x76, 0xe8, 0x2c, 0x05, 0xa4, 0x49, 0xbc, 0xc8, 0x75, 0x5a, 0xf5, - 0x0e, 0x69, 0x68, 0xf2, 0x70, 0x66, 0xd5, 0x77, 0x65, 0xad, 0xbe, 0x90, 0xc6, 0xc0, 0x39, 0x35, - 0xd1, 0x0a, 0xa0, 0x34, 0x44, 0xcc, 0x30, 0x8b, 0xf9, 0x9d, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x0f, - 0xc1, 0xb8, 0x32, 0x72, 0xd4, 0x66, 0x9c, 0x1d, 0xec, 0x58, 0x07, 0x60, 0x13, 0x0f, 0x5d, 0xe6, - 0x41, 0xe3, 0x45, 0x7a, 0x01, 0x21, 0x3c, 0x9f, 0x94, 0x11, 0xe1, 0x45, 0x31, 0xd6, 0x71, 0x16, - 0x2f, 0xfe, 0xda, 0xef, 0x9f, 0x7b, 0xcf, 0x6f, 0xfe, 0xfe, 0xb9, 0xf7, 0xfc, 0xce, 0xef, 0x9f, - 0x7b, 0xcf, 0x77, 0xdd, 0x3b, 0x67, 0xfd, 0xda, 0xbd, 0x73, 0xd6, 0x6f, 0xde, 0x3b, 0x67, 0xfd, - 0xce, 0xbd, 0x73, 0xd6, 0xef, 0xdd, 0x3b, 0x67, 0x7d, 0xe1, 0x0f, 0xce, 0xbd, 0xe7, 0x13, 0xa5, - 0xbd, 0xcb, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xc9, 0x26, 0x9f, 0xc5, 0x07, 0x01, 0x00, + proto.RegisterFile("k8s.io/api/core/v1/generated.proto", fileDescriptor_6c07b07c062484ab) +} + +var fileDescriptor_6c07b07c062484ab = []byte{ + // 16056 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x79, 0x90, 0x1c, 0xd7, + 0x79, 0x18, 0xae, 0x9e, 0xd9, 0xf3, 0xdb, 0xfb, 0x2d, 0x8e, 0xc5, 0x12, 0xc0, 0x80, 0x4d, 0x12, + 0x04, 0xaf, 0x85, 0xc0, 0x43, 0x84, 0x48, 0x8a, 0xe6, 0x9e, 0xc0, 0x12, 0xd8, 0xc5, 0xf0, 0xcd, + 0x02, 0x90, 0x28, 0x4a, 0x56, 0x63, 0xe6, 0xed, 0x6e, 0x6b, 0x67, 0xba, 0x87, 0xdd, 0x3d, 0x0b, + 0x2c, 0x7e, 0x72, 0xd9, 0x96, 0x7f, 0x96, 0x2d, 0xd9, 0xa9, 0x52, 0xb9, 0x9c, 0x38, 0x25, 0xbb, + 0x5c, 0x29, 0xc7, 0xf1, 0x11, 0xc5, 0x4e, 0x14, 0x39, 0xb6, 0x63, 0xf9, 0xca, 0x55, 0x65, 0x27, + 0x29, 0xc7, 0x71, 0x55, 0x24, 0x57, 0x5c, 0x59, 0x47, 0xeb, 0x54, 0xb9, 0xfc, 0x47, 0x6c, 0x97, + 0x93, 0x3f, 0x92, 0x2d, 0x27, 0x4e, 0xbd, 0xb3, 0xdf, 0xeb, 0x63, 0x66, 0x16, 0x04, 0x56, 0x14, + 0x8b, 0xff, 0xcd, 0xbc, 0xef, 0x7b, 0xdf, 0x7b, 0xfd, 0xce, 0xef, 0x7d, 0x27, 0xd8, 0x5b, 0x17, + 0xc3, 0x19, 0xd7, 0x3f, 0xef, 0x34, 0xdd, 0xf3, 0x55, 0x3f, 0x20, 0xe7, 0xb7, 0x2f, 0x9c, 0xdf, + 0x20, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x66, 0x9a, 0x81, 0x1f, 0xf9, 0x08, 0x71, 0x9c, 0x19, 0xa7, + 0xe9, 0xce, 0x50, 0x9c, 0x99, 0xed, 0x0b, 0xd3, 0xcf, 0x6c, 0xb8, 0xd1, 0x66, 0xeb, 0xd6, 0x4c, + 0xd5, 0x6f, 0x9c, 0xdf, 0xf0, 0x37, 0xfc, 0xf3, 0x0c, 0xf5, 0x56, 0x6b, 0x9d, 0xfd, 0x63, 0x7f, + 0xd8, 0x2f, 0x4e, 0x62, 0xfa, 0xf9, 0xb8, 0x99, 0x86, 0x53, 0xdd, 0x74, 0x3d, 0x12, 0xec, 0x9c, + 0x6f, 0x6e, 0x6d, 0xb0, 0x76, 0x03, 0x12, 0xfa, 0xad, 0xa0, 0x4a, 0x92, 0x0d, 0xb7, 0xad, 0x15, + 0x9e, 0x6f, 0x90, 0xc8, 0xc9, 0xe8, 0xee, 0xf4, 0xf9, 0xbc, 0x5a, 0x41, 0xcb, 0x8b, 0xdc, 0x46, + 0xba, 0x99, 0x0f, 0x75, 0xaa, 0x10, 0x56, 0x37, 0x49, 0xc3, 0x49, 0xd5, 0x7b, 0x2e, 0xaf, 0x5e, + 0x2b, 0x72, 0xeb, 0xe7, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0x86, 0x05, 0x67, 0x66, + 0x6f, 0x56, 0x16, 0xeb, 0x4e, 0x18, 0xb9, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, 0xaa, 0x44, 0x7e, 0x40, + 0x6e, 0xf8, 0xf5, 0x56, 0x83, 0x54, 0xd8, 0x40, 0xa0, 0xa7, 0x61, 0x60, 0x9b, 0xfd, 0x5f, 0x5e, + 0x98, 0xb2, 0xce, 0x58, 0xe7, 0x06, 0xe7, 0xc6, 0x7f, 0x67, 0xb7, 0xf4, 0x81, 0xbd, 0xdd, 0xd2, + 0xc0, 0x0d, 0x51, 0x8e, 0x15, 0x06, 0x3a, 0x0b, 0x7d, 0xeb, 0xe1, 0xda, 0x4e, 0x93, 0x4c, 0x15, + 0x18, 0xee, 0xa8, 0xc0, 0xed, 0x5b, 0xaa, 0xd0, 0x52, 0x2c, 0xa0, 0xe8, 0x3c, 0x0c, 0x36, 0x9d, + 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8a, 0x67, 0xac, 0x73, 0xbd, 0x73, 0x13, 0x02, 0x75, 0xb0, + 0x2c, 0x01, 0x38, 0xc6, 0xa1, 0xdd, 0x08, 0x88, 0x53, 0xbb, 0xe6, 0xd5, 0x77, 0xa6, 0x7a, 0xce, + 0x58, 0xe7, 0x06, 0xe2, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xa5, 0x02, 0x0c, 0xcc, 0xae, + 0xaf, 0xbb, 0x9e, 0x1b, 0xed, 0xa0, 0x1b, 0x30, 0xec, 0xf9, 0x35, 0x22, 0xff, 0xb3, 0xaf, 0x18, + 0x7a, 0xf6, 0xcc, 0x4c, 0x7a, 0x29, 0xcd, 0xac, 0x6a, 0x78, 0x73, 0xe3, 0x7b, 0xbb, 0xa5, 0x61, + 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xa1, 0xa6, 0x5f, 0x53, 0x64, 0x0b, 0x8c, 0x6c, 0x29, 0x8b, + 0x6c, 0x39, 0x46, 0x9b, 0x1b, 0xdb, 0xdb, 0x2d, 0x0d, 0x69, 0x05, 0x58, 0x27, 0x82, 0x6e, 0xc1, + 0x18, 0xfd, 0xeb, 0x45, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x1f, 0xc9, 0xa3, 0xab, 0xa1, 0xce, 0x4d, + 0xee, 0xed, 0x96, 0xc6, 0x12, 0x85, 0x38, 0x49, 0xd0, 0xfe, 0x61, 0x0b, 0xc6, 0x66, 0x9b, 0xcd, + 0xd9, 0xa0, 0xe1, 0x07, 0xe5, 0xc0, 0x5f, 0x77, 0xeb, 0x04, 0xbd, 0x08, 0x3d, 0x11, 0x9d, 0x35, + 0x3e, 0xc3, 0x8f, 0x88, 0xa1, 0xed, 0xa1, 0x73, 0xb5, 0xbf, 0x5b, 0x9a, 0x4c, 0xa0, 0xb3, 0xa9, + 0x64, 0x15, 0xd0, 0x6b, 0x30, 0x5e, 0xf7, 0xab, 0x4e, 0x7d, 0xd3, 0x0f, 0x23, 0x01, 0x15, 0x53, + 0x7f, 0x64, 0x6f, 0xb7, 0x34, 0x7e, 0x35, 0x01, 0xc3, 0x29, 0x6c, 0xfb, 0x2e, 0x8c, 0xce, 0x46, + 0x91, 0x53, 0xdd, 0x24, 0x35, 0xbe, 0xa0, 0xd0, 0xf3, 0xd0, 0xe3, 0x39, 0x0d, 0xd9, 0x99, 0x33, + 0xb2, 0x33, 0xab, 0x4e, 0x83, 0x76, 0x66, 0xfc, 0xba, 0xe7, 0xbe, 0xdd, 0x12, 0x8b, 0x94, 0x96, + 0x61, 0x86, 0x8d, 0x9e, 0x05, 0xa8, 0x91, 0x6d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x29, 0xfa, 0x80, + 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x60, 0x70, 0x76, 0xdb, 0x77, 0x6b, 0x65, + 0xbf, 0x16, 0xa2, 0x2d, 0x18, 0x6b, 0x06, 0x64, 0x9d, 0x04, 0xaa, 0x68, 0xca, 0x3a, 0x53, 0x3c, + 0x37, 0xf4, 0xec, 0xb9, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x99, 0x3b, 0x2e, 0xda, + 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0xaf, 0x0b, 0x70, 0x74, 0xf6, 0x6e, 0x2b, 0x20, 0x0b, + 0x6e, 0xb8, 0x95, 0xdc, 0x70, 0x35, 0x37, 0xdc, 0x5a, 0x8d, 0x47, 0x40, 0xad, 0xf4, 0x05, 0x51, + 0x8e, 0x15, 0x06, 0x7a, 0x06, 0xfa, 0xe9, 0xef, 0xeb, 0x78, 0x59, 0x7c, 0xf2, 0xa4, 0x40, 0x1e, + 0x5a, 0x70, 0x22, 0x67, 0x81, 0x83, 0xb0, 0xc4, 0x41, 0x2b, 0x30, 0x54, 0x65, 0xe7, 0xc3, 0xc6, + 0x8a, 0x5f, 0x23, 0x6c, 0x6d, 0x0d, 0xce, 0x3d, 0x45, 0xd1, 0xe7, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, + 0x14, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0xbb, 0xf7, 0x30, 0x4a, 0x90, + 0xb1, 0xd5, 0xcf, 0x69, 0x3b, 0xb7, 0x97, 0xed, 0xdc, 0xe1, 0xec, 0x5d, 0x8b, 0x2e, 0x40, 0xcf, + 0x96, 0xeb, 0xd5, 0xa6, 0xfa, 0x18, 0xad, 0x53, 0x74, 0xce, 0xaf, 0xb8, 0x5e, 0x6d, 0x7f, 0xb7, + 0x34, 0x61, 0x74, 0x87, 0x16, 0x62, 0x86, 0x6a, 0xff, 0x0f, 0x0b, 0x4a, 0x0c, 0xb6, 0xe4, 0xd6, + 0x49, 0x99, 0x04, 0xa1, 0x1b, 0x46, 0xc4, 0x8b, 0x8c, 0x01, 0x7d, 0x16, 0x20, 0x24, 0xd5, 0x80, + 0x44, 0xda, 0x90, 0xaa, 0x85, 0x51, 0x51, 0x10, 0xac, 0x61, 0xd1, 0xf3, 0x29, 0xdc, 0x74, 0x02, + 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0xf3, 0xa9, 0x22, 0x01, 0x38, 0xc6, 0x31, 0xce, 0xa7, 0x62, 0xa7, + 0xf3, 0x09, 0x7d, 0x04, 0xc6, 0xe2, 0xc6, 0xc2, 0xa6, 0x53, 0x95, 0x03, 0xc8, 0x76, 0x70, 0xc5, + 0x04, 0xe1, 0x24, 0xae, 0xfd, 0x0f, 0x2d, 0xb1, 0x78, 0xe8, 0x57, 0xbf, 0xcb, 0xbf, 0xd5, 0xfe, + 0x55, 0x0b, 0xfa, 0xe7, 0x5c, 0xaf, 0xe6, 0x7a, 0x1b, 0xe8, 0x53, 0x30, 0x40, 0xaf, 0xca, 0x9a, + 0x13, 0x39, 0xe2, 0x18, 0xfe, 0xa0, 0xb6, 0xb7, 0xd4, 0xcd, 0x35, 0xd3, 0xdc, 0xda, 0xa0, 0x05, + 0xe1, 0x0c, 0xc5, 0xa6, 0xbb, 0xed, 0xda, 0xad, 0x4f, 0x93, 0x6a, 0xb4, 0x42, 0x22, 0x27, 0xfe, + 0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x0a, 0xf4, 0x45, 0x4e, 0xb0, 0x41, 0x22, 0x71, 0x1e, 0x67, + 0x9e, 0x9b, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x55, 0x12, 0xdf, 0x52, 0x6b, 0xac, 0x2a, 0x16, + 0x24, 0xec, 0xff, 0xd3, 0x0f, 0x27, 0xe6, 0x2b, 0xcb, 0x39, 0xeb, 0xea, 0x2c, 0xf4, 0xd5, 0x02, + 0x77, 0x9b, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x05, 0x56, 0x8a, 0x05, 0x14, 0x5d, 0x84, 0x61, 0x7e, + 0x3f, 0x5e, 0x76, 0xbc, 0x5a, 0x7c, 0x3c, 0x0a, 0xec, 0xe1, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0x07, + 0x5c, 0x54, 0x67, 0x13, 0x9b, 0x31, 0xef, 0xee, 0xfd, 0xbc, 0x05, 0xe3, 0xbc, 0x99, 0xd9, 0x28, + 0x0a, 0xdc, 0x5b, 0xad, 0x88, 0x84, 0x53, 0xbd, 0xec, 0xa4, 0x9b, 0xcf, 0x1a, 0xad, 0xdc, 0x11, + 0x98, 0xb9, 0x91, 0xa0, 0xc2, 0x0f, 0xc1, 0x29, 0xd1, 0xee, 0x78, 0x12, 0x8c, 0x53, 0xcd, 0xa2, + 0xef, 0xb3, 0x60, 0xba, 0xea, 0x7b, 0x51, 0xe0, 0xd7, 0xeb, 0x24, 0x28, 0xb7, 0x6e, 0xd5, 0xdd, + 0x70, 0x93, 0xaf, 0x53, 0x4c, 0xd6, 0xd9, 0x49, 0x90, 0x33, 0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xe9, + 0xbd, 0xdd, 0xd2, 0xf4, 0x7c, 0x2e, 0x29, 0xdc, 0xa6, 0x19, 0xb4, 0x05, 0x88, 0xde, 0xec, 0x95, + 0xc8, 0xd9, 0x20, 0x71, 0xe3, 0xfd, 0xdd, 0x37, 0x7e, 0x6c, 0x6f, 0xb7, 0x84, 0x56, 0x53, 0x24, + 0x70, 0x06, 0x59, 0xf4, 0x36, 0x1c, 0xa1, 0xa5, 0xa9, 0x6f, 0x1d, 0xe8, 0xbe, 0xb9, 0xa9, 0xbd, + 0xdd, 0xd2, 0x91, 0xd5, 0x0c, 0x22, 0x38, 0x93, 0x34, 0xfa, 0x1e, 0x0b, 0x4e, 0xc4, 0x9f, 0xbf, + 0x78, 0xa7, 0xe9, 0x78, 0xb5, 0xb8, 0xe1, 0xc1, 0xee, 0x1b, 0xa6, 0x67, 0xf2, 0x89, 0xf9, 0x3c, + 0x4a, 0x38, 0xbf, 0x11, 0xe4, 0xc1, 0x24, 0xed, 0x5a, 0xb2, 0x6d, 0xe8, 0xbe, 0xed, 0xe3, 0x7b, + 0xbb, 0xa5, 0xc9, 0xd5, 0x34, 0x0d, 0x9c, 0x45, 0x78, 0x7a, 0x1e, 0x8e, 0x66, 0xae, 0x4e, 0x34, + 0x0e, 0xc5, 0x2d, 0xc2, 0x99, 0xc0, 0x41, 0x4c, 0x7f, 0xa2, 0x23, 0xd0, 0xbb, 0xed, 0xd4, 0x5b, + 0x62, 0x63, 0x62, 0xfe, 0xe7, 0xa5, 0xc2, 0x45, 0xcb, 0xfe, 0x37, 0x45, 0x18, 0x9b, 0xaf, 0x2c, + 0xdf, 0xd3, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, 0x97, 0xe8, + 0x77, 0x67, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9c, 0xb3, 0x65, 0xef, 0xf3, 0x46, 0xdd, 0xce, + 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x03, 0x2e, 0xdd, + 0xfb, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x2d, 0xb7, 0xee, 0x46, 0x2e, 0x09, 0xd1, + 0xe3, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdd, 0xdb, 0x2d, 0x15, 0x67, 0x6b, + 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x83, 0x29, 0x06, 0x7a, 0x12, 0x7a, 0x6a, 0x81, 0xdf, 0x9c, 0x2a, + 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x6f, 0x17, 0xe0, + 0xe4, 0x3c, 0x69, 0x6e, 0x2e, 0x55, 0x72, 0xee, 0x8b, 0x73, 0x30, 0xd0, 0xf0, 0x3d, 0x37, 0xf2, + 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x40, 0x4f, 0x33, 0x66, + 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, 0xc5, 0x28, + 0x8c, 0xeb, 0x21, 0x09, 0x30, 0x83, 0xc4, 0x9c, 0x00, 0xe5, 0x11, 0xc4, 0x8d, 0x90, 0xe0, 0x04, + 0x28, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x83, 0x61, 0x62, 0x66, 0xbb, 0xda, 0x9a, 0x23, 0x8c, 0x55, + 0x50, 0x33, 0x19, 0x13, 0x31, 0x6e, 0xb0, 0xbe, 0x8e, 0xac, 0xc2, 0xd7, 0x0a, 0x80, 0xf8, 0x10, + 0x7e, 0x9b, 0x0d, 0xdc, 0xf5, 0xf4, 0xc0, 0x75, 0xbf, 0x25, 0xee, 0xd7, 0xe8, 0xfd, 0x4f, 0x0b, + 0x4e, 0xce, 0xbb, 0x5e, 0x8d, 0x04, 0x39, 0x0b, 0xf0, 0xc1, 0x3c, 0xe5, 0x0f, 0xc6, 0xa4, 0x18, + 0x4b, 0xac, 0xe7, 0x3e, 0x2c, 0x31, 0xfb, 0x2f, 0x2c, 0x40, 0xfc, 0xb3, 0xdf, 0x75, 0x1f, 0x7b, + 0x3d, 0xfd, 0xb1, 0xf7, 0x61, 0x59, 0xd8, 0x57, 0x61, 0x74, 0xbe, 0xee, 0x12, 0x2f, 0x5a, 0x2e, + 0xcf, 0xfb, 0xde, 0xba, 0xbb, 0x81, 0x5e, 0x82, 0xd1, 0xc8, 0x6d, 0x10, 0xbf, 0x15, 0x55, 0x48, + 0xd5, 0xf7, 0xd8, 0xcb, 0xd5, 0x3a, 0xd7, 0x3b, 0x87, 0xf6, 0x76, 0x4b, 0xa3, 0x6b, 0x06, 0x04, + 0x27, 0x30, 0xed, 0x9f, 0xa1, 0xe7, 0x56, 0xbd, 0x15, 0x46, 0x24, 0x58, 0x0b, 0x5a, 0x61, 0x34, + 0xd7, 0xa2, 0xbc, 0x67, 0x39, 0xf0, 0x69, 0x77, 0x5c, 0xdf, 0x43, 0x27, 0x8d, 0xe7, 0xf8, 0x80, + 0x7c, 0x8a, 0x8b, 0x67, 0xf7, 0x0c, 0x40, 0xe8, 0x6e, 0x78, 0x24, 0xd0, 0x9e, 0x0f, 0xa3, 0x6c, + 0xab, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x3a, 0x8c, 0xd4, 0x9d, 0x5b, 0xa4, 0x5e, 0x21, 0x75, 0x52, + 0x8d, 0xfc, 0x40, 0xc8, 0x37, 0x9e, 0xeb, 0xee, 0x1d, 0x70, 0x55, 0xaf, 0x3a, 0x37, 0xb1, 0xb7, + 0x5b, 0x1a, 0x31, 0x8a, 0xb0, 0x49, 0x9c, 0x1e, 0x1d, 0x7e, 0x93, 0x7e, 0x85, 0x53, 0xd7, 0x1f, + 0x9f, 0xd7, 0x44, 0x19, 0x56, 0x50, 0x75, 0x74, 0xf4, 0xe4, 0x1d, 0x1d, 0xf6, 0x1f, 0xd1, 0x85, + 0xe6, 0x37, 0x9a, 0xbe, 0x47, 0xbc, 0x68, 0xde, 0xf7, 0x6a, 0x5c, 0x32, 0xf5, 0x92, 0x21, 0x3a, + 0x39, 0x9b, 0x10, 0x9d, 0x1c, 0x4b, 0xd7, 0xd0, 0xa4, 0x27, 0x1f, 0x86, 0xbe, 0x30, 0x72, 0xa2, + 0x56, 0x28, 0x06, 0xee, 0x61, 0xb9, 0xec, 0x2a, 0xac, 0x74, 0x7f, 0xb7, 0x34, 0xa6, 0xaa, 0xf1, + 0x22, 0x2c, 0x2a, 0xa0, 0x27, 0xa0, 0xbf, 0x41, 0xc2, 0xd0, 0xd9, 0x90, 0x6c, 0xc3, 0x98, 0xa8, + 0xdb, 0xbf, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0x47, 0xa0, 0x97, 0x04, 0x81, 0x1f, 0x88, 0x6f, 0x1b, + 0x11, 0x88, 0xbd, 0x8b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, 0x18, 0x53, 0x7d, 0xe5, 0x6d, + 0x1d, 0xc2, 0x73, 0xed, 0x4d, 0x80, 0xaa, 0xfc, 0xc0, 0x90, 0x5d, 0xb3, 0x43, 0xcf, 0x9e, 0xcd, + 0xe4, 0x68, 0x52, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, 0xbf, 0x61, 0xc1, 0x64, + 0xe2, 0x8b, 0xae, 0xba, 0x61, 0x84, 0xde, 0x4a, 0x7d, 0xd5, 0x4c, 0x97, 0x8b, 0xcf, 0x0d, 0xf9, + 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x0c, 0xbd, 0x6e, 0x44, 0x1a, 0xf2, 0x63, 0x1e, + 0x69, 0xfb, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x2c, 0xd3, 0x9a, 0x98, 0x13, 0xb0, 0x7f, 0xbb, 0x08, + 0x83, 0x7c, 0x7f, 0xaf, 0x38, 0xcd, 0x43, 0x98, 0x8b, 0xa7, 0x60, 0xd0, 0x6d, 0x34, 0x5a, 0x91, + 0x73, 0x4b, 0xdc, 0x7b, 0x03, 0xfc, 0x0c, 0x5a, 0x96, 0x85, 0x38, 0x86, 0xa3, 0x65, 0xe8, 0x61, + 0x5d, 0xe1, 0x5f, 0xf9, 0x78, 0xf6, 0x57, 0x8a, 0xbe, 0xcf, 0x2c, 0x38, 0x91, 0xc3, 0x59, 0x4e, + 0xb5, 0xaf, 0x68, 0x11, 0x66, 0x24, 0x90, 0x03, 0x70, 0xcb, 0xf5, 0x9c, 0x60, 0x87, 0x96, 0x4d, + 0x15, 0x19, 0xc1, 0x67, 0xda, 0x13, 0x9c, 0x53, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18, 0x80, 0x35, + 0xa2, 0xd3, 0x2f, 0xc2, 0xa0, 0x42, 0x3e, 0x08, 0xe7, 0x38, 0xfd, 0x11, 0x18, 0x4b, 0xb4, 0xd5, + 0xa9, 0xfa, 0xb0, 0xce, 0x78, 0xfe, 0x1a, 0x3b, 0x32, 0x44, 0xaf, 0x17, 0xbd, 0x6d, 0x71, 0x37, + 0xdd, 0x85, 0x23, 0xf5, 0x8c, 0x23, 0x5f, 0xcc, 0x6b, 0xf7, 0x57, 0xc4, 0x49, 0xf1, 0xd9, 0x47, + 0xb2, 0xa0, 0x38, 0xb3, 0x0d, 0xe3, 0x44, 0x2c, 0xb4, 0x3b, 0x11, 0xe9, 0x79, 0x77, 0x44, 0x75, + 0xfe, 0x0a, 0xd9, 0x51, 0x87, 0xea, 0xb7, 0xb2, 0xfb, 0xa7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x48, + 0x10, 0x28, 0x5e, 0x21, 0x3b, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0xb6, 0xfd, 0xba, 0xaf, 0x58, 0x30, + 0xa2, 0xbe, 0xee, 0x10, 0xce, 0x85, 0x39, 0xf3, 0x5c, 0x38, 0xd5, 0x76, 0x81, 0xe7, 0x9c, 0x08, + 0x5f, 0x2b, 0xc0, 0x09, 0x85, 0x43, 0x1f, 0x51, 0xfc, 0x8f, 0x58, 0x55, 0xe7, 0x61, 0xd0, 0x53, + 0xe2, 0x44, 0xcb, 0x94, 0xe3, 0xc5, 0xc2, 0xc4, 0x18, 0x87, 0x5e, 0x79, 0x5e, 0x7c, 0x69, 0x0f, + 0xeb, 0x72, 0x76, 0x71, 0xb9, 0xcf, 0x41, 0xb1, 0xe5, 0xd6, 0xc4, 0x05, 0xf3, 0x41, 0x39, 0xda, + 0xd7, 0x97, 0x17, 0xf6, 0x77, 0x4b, 0x0f, 0xe7, 0xa9, 0x9c, 0xe8, 0xcd, 0x16, 0xce, 0x5c, 0x5f, + 0x5e, 0xc0, 0xb4, 0x32, 0x9a, 0x85, 0x31, 0xa9, 0x55, 0xbb, 0x41, 0xf9, 0x52, 0xdf, 0x13, 0xf7, + 0x90, 0x12, 0x96, 0x63, 0x13, 0x8c, 0x93, 0xf8, 0x68, 0x01, 0xc6, 0xb7, 0x5a, 0xb7, 0x48, 0x9d, + 0x44, 0xfc, 0x83, 0xaf, 0x10, 0x2e, 0x4a, 0x1e, 0x8c, 0x9f, 0xb0, 0x57, 0x12, 0x70, 0x9c, 0xaa, + 0x61, 0xff, 0x0d, 0xbb, 0x0f, 0xc4, 0xe8, 0x69, 0xfc, 0xcd, 0xb7, 0x72, 0x39, 0x77, 0xb3, 0x2a, + 0xae, 0x90, 0x9d, 0x35, 0x9f, 0xf2, 0x21, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x3d, 0x6d, 0xd7, 0xfc, + 0x2f, 0x15, 0xe0, 0xa8, 0x1a, 0x01, 0x83, 0x5b, 0xfe, 0x76, 0x1f, 0x83, 0x0b, 0x30, 0x54, 0x23, + 0xeb, 0x4e, 0xab, 0x1e, 0x29, 0xbd, 0x46, 0x2f, 0x57, 0xb5, 0x2d, 0xc4, 0xc5, 0x58, 0xc7, 0x39, + 0xc0, 0xb0, 0xfd, 0xc2, 0x08, 0xbb, 0x88, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d, + 0xf3, 0x08, 0xf4, 0xba, 0x0d, 0xca, 0x98, 0x15, 0x4c, 0x7e, 0x6b, 0x99, 0x16, 0x62, 0x0e, 0x43, + 0x8f, 0x41, 0x7f, 0xd5, 0x6f, 0x34, 0x1c, 0xaf, 0xc6, 0xae, 0xbc, 0xc1, 0xb9, 0x21, 0xca, 0xbb, + 0xcd, 0xf3, 0x22, 0x2c, 0x61, 0x94, 0xf9, 0x76, 0x82, 0x0d, 0x2e, 0xec, 0x11, 0xcc, 0xf7, 0x6c, + 0xb0, 0x11, 0x62, 0x56, 0x4a, 0xdf, 0xaa, 0xb7, 0xfd, 0x60, 0xcb, 0xf5, 0x36, 0x16, 0xdc, 0x40, + 0x6c, 0x09, 0x75, 0x17, 0xde, 0x54, 0x10, 0xac, 0x61, 0xa1, 0x25, 0xe8, 0x6d, 0xfa, 0x41, 0x14, + 0x4e, 0xf5, 0xb1, 0xe1, 0x7e, 0x38, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0xf6, 0x83, 0x28, 0xfe, 0x00, + 0xfa, 0x2f, 0xc4, 0xbc, 0x3a, 0xba, 0x0a, 0xfd, 0xc4, 0xdb, 0x5e, 0x0a, 0xfc, 0xc6, 0xd4, 0x64, + 0x3e, 0xa5, 0x45, 0x8e, 0xc2, 0x97, 0x59, 0xcc, 0xa3, 0x8a, 0x62, 0x2c, 0x49, 0xa0, 0x0f, 0x43, + 0x91, 0x78, 0xdb, 0x53, 0xfd, 0x8c, 0xd2, 0x74, 0x0e, 0xa5, 0x1b, 0x4e, 0x10, 0x9f, 0xf9, 0x8b, + 0xde, 0x36, 0xa6, 0x75, 0xd0, 0xc7, 0x60, 0x50, 0x1e, 0x18, 0xa1, 0x90, 0xa2, 0x66, 0x2e, 0x58, + 0x79, 0xcc, 0x60, 0xf2, 0x76, 0xcb, 0x0d, 0x48, 0x83, 0x78, 0x51, 0x18, 0x9f, 0x90, 0x12, 0x1a, + 0xe2, 0x98, 0x1a, 0xaa, 0xc2, 0x70, 0x40, 0x42, 0xf7, 0x2e, 0x29, 0xfb, 0x75, 0xb7, 0xba, 0x33, + 0x75, 0x9c, 0x75, 0xef, 0x89, 0xb6, 0x43, 0x86, 0xb5, 0x0a, 0xb1, 0x94, 0x5f, 0x2f, 0xc5, 0x06, + 0x51, 0xf4, 0x06, 0x8c, 0x04, 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xad, 0x4c, 0x29, 0xad, 0xdc, 0x08, + 0xd6, 0x01, 0xfc, 0x39, 0x11, 0x37, 0x13, 0x43, 0xb0, 0x49, 0x01, 0x7d, 0x4c, 0xaa, 0x1c, 0x56, + 0xfc, 0x96, 0x17, 0x85, 0x53, 0x83, 0xac, 0xdf, 0x99, 0xba, 0xe9, 0x1b, 0x31, 0x5e, 0x52, 0x27, + 0xc1, 0x2b, 0x63, 0x83, 0x14, 0xfa, 0x04, 0x8c, 0xf0, 0xff, 0x5c, 0xa5, 0x1a, 0x4e, 0x1d, 0x65, + 0xb4, 0xcf, 0xe4, 0xd3, 0xe6, 0x88, 0x73, 0x47, 0x05, 0xf1, 0x11, 0xbd, 0x34, 0xc4, 0x26, 0x35, + 0x84, 0x61, 0xa4, 0xee, 0x6e, 0x13, 0x8f, 0x84, 0x61, 0x39, 0xf0, 0x6f, 0x11, 0x21, 0x21, 0x3e, + 0x91, 0xad, 0x82, 0xf5, 0x6f, 0x11, 0xf1, 0x08, 0xd4, 0xeb, 0x60, 0x93, 0x04, 0xba, 0x0e, 0xa3, + 0xf4, 0x49, 0xee, 0xc6, 0x44, 0x87, 0x3a, 0x11, 0x65, 0x0f, 0x67, 0x6c, 0x54, 0xc2, 0x09, 0x22, + 0xe8, 0x1a, 0x0c, 0xb3, 0x31, 0x6f, 0x35, 0x39, 0xd1, 0x63, 0x9d, 0x88, 0x32, 0x83, 0x82, 0x8a, + 0x56, 0x05, 0x1b, 0x04, 0xd0, 0xeb, 0x30, 0x58, 0x77, 0xd7, 0x49, 0x75, 0xa7, 0x5a, 0x27, 0x53, + 0xc3, 0x8c, 0x5a, 0xe6, 0x61, 0x78, 0x55, 0x22, 0x71, 0xfe, 0x5c, 0xfd, 0xc5, 0x71, 0x75, 0x74, + 0x03, 0x8e, 0x45, 0x24, 0x68, 0xb8, 0x9e, 0x43, 0x0f, 0x31, 0xf1, 0x24, 0x64, 0x9a, 0xf1, 0x11, + 0xb6, 0xba, 0x4e, 0x8b, 0xd9, 0x38, 0xb6, 0x96, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0x3b, 0x30, 0x95, + 0x01, 0xe1, 0xeb, 0xf6, 0x08, 0xa3, 0xfc, 0x8a, 0xa0, 0x3c, 0xb5, 0x96, 0x83, 0xb7, 0xdf, 0x06, + 0x86, 0x73, 0xa9, 0xa3, 0x6b, 0x30, 0xc6, 0x4e, 0xce, 0x72, 0xab, 0x5e, 0x17, 0x0d, 0x8e, 0xb2, + 0x06, 0x1f, 0x93, 0x7c, 0xc4, 0xb2, 0x09, 0xde, 0xdf, 0x2d, 0x41, 0xfc, 0x0f, 0x27, 0x6b, 0xa3, + 0x5b, 0x4c, 0x09, 0xdb, 0x0a, 0xdc, 0x68, 0x87, 0xee, 0x2a, 0x72, 0x27, 0x9a, 0x1a, 0x6b, 0x2b, + 0x90, 0xd2, 0x51, 0x95, 0xa6, 0x56, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x2a, 0x08, 0xa3, 0x9a, 0xeb, + 0x4d, 0x8d, 0xf3, 0xf7, 0x94, 0x3c, 0x49, 0x2b, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xc0, 0xd2, 0x1f, + 0xd7, 0xe8, 0x8d, 0x3b, 0xc1, 0x10, 0x63, 0x05, 0xac, 0x04, 0xe0, 0x18, 0x87, 0x32, 0xc1, 0x51, + 0xb4, 0x33, 0x85, 0x18, 0xaa, 0x3a, 0x10, 0xd7, 0xd6, 0x3e, 0x86, 0x69, 0xb9, 0x7d, 0x0b, 0x46, + 0xd5, 0x31, 0xc1, 0xc6, 0x04, 0x95, 0xa0, 0x97, 0xb1, 0x7d, 0x42, 0x7c, 0x3a, 0x48, 0xbb, 0xc0, + 0x58, 0x42, 0xcc, 0xcb, 0x59, 0x17, 0xdc, 0xbb, 0x64, 0x6e, 0x27, 0x22, 0x5c, 0x16, 0x51, 0xd4, + 0xba, 0x20, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x2f, 0x67, 0x9f, 0xe3, 0x5b, 0xa2, 0x8b, 0x7b, 0xf1, + 0x69, 0x18, 0x60, 0x86, 0x1f, 0x7e, 0xc0, 0xb5, 0xb3, 0xbd, 0x31, 0xc3, 0x7c, 0x59, 0x94, 0x63, + 0x85, 0x81, 0x5e, 0x86, 0x91, 0xaa, 0xde, 0x80, 0xb8, 0xd4, 0xd5, 0x31, 0x62, 0xb4, 0x8e, 0x4d, + 0x5c, 0x74, 0x11, 0x06, 0x98, 0x8d, 0x53, 0xd5, 0xaf, 0x0b, 0x6e, 0x53, 0x72, 0x26, 0x03, 0x65, + 0x51, 0xbe, 0xaf, 0xfd, 0xc6, 0x0a, 0x1b, 0x9d, 0x85, 0x3e, 0xda, 0x85, 0xe5, 0xb2, 0xb8, 0x4e, + 0x95, 0x24, 0xf0, 0x32, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0x86, 0xc5, 0x78, 0xa9, 0xf4, 0x99, 0x8f, + 0x2e, 0xb3, 0x4b, 0x83, 0xdd, 0x20, 0x9a, 0x16, 0xfe, 0x51, 0xed, 0x26, 0x50, 0xb0, 0xfd, 0xc4, + 0x7f, 0x6c, 0xd4, 0x44, 0x6f, 0x26, 0x6f, 0x06, 0xce, 0x50, 0x3c, 0x2f, 0x87, 0x20, 0x79, 0x3b, + 0x3c, 0x14, 0x5f, 0x71, 0xb4, 0x3f, 0xed, 0xae, 0x08, 0xfb, 0x47, 0x0a, 0xda, 0x2a, 0xa9, 0x44, + 0x4e, 0x44, 0x50, 0x19, 0xfa, 0x6f, 0x3b, 0x6e, 0xe4, 0x7a, 0x1b, 0x82, 0xef, 0x6b, 0x7f, 0xd1, + 0xb1, 0x4a, 0x37, 0x79, 0x05, 0xce, 0xbd, 0x88, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0x96, 0xe7, + 0x51, 0x8a, 0x85, 0x6e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0xde, 0x02, + 0x90, 0x27, 0x04, 0xa9, 0x09, 0xd9, 0xe1, 0xd3, 0x9d, 0x89, 0xae, 0xa9, 0x3a, 0x5c, 0x38, 0x19, + 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0xe6, 0x54, 0xef, 0x0c, 0xfa, 0x38, 0xdd, 0xa2, 0x4e, 0x10, + 0x91, 0xda, 0x6c, 0x24, 0x06, 0xe7, 0xc9, 0xee, 0x1e, 0x87, 0x6b, 0x6e, 0x83, 0xe8, 0xdb, 0x59, + 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x57, 0x8a, 0x30, 0x95, 0xd7, 0x5d, 0xba, 0x69, 0xc8, 0x1d, 0x37, + 0x9a, 0xa7, 0x6c, 0xad, 0x65, 0x6e, 0x9a, 0x45, 0x51, 0x8e, 0x15, 0x06, 0x5d, 0xbd, 0xa1, 0xbb, + 0x21, 0xdf, 0xf6, 0xbd, 0xf1, 0xea, 0xad, 0xb0, 0x52, 0x2c, 0xa0, 0x14, 0x2f, 0x20, 0x4e, 0x28, + 0x8c, 0xef, 0xb4, 0x55, 0x8e, 0x59, 0x29, 0x16, 0x50, 0x5d, 0xca, 0xd8, 0xd3, 0x41, 0xca, 0x68, + 0x0c, 0x51, 0xef, 0xfd, 0x1d, 0x22, 0xf4, 0x49, 0x80, 0x75, 0xd7, 0x73, 0xc3, 0x4d, 0x46, 0xbd, + 0xef, 0xc0, 0xd4, 0x15, 0x53, 0xbc, 0xa4, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x01, 0x86, 0xd4, 0x01, + 0xb2, 0xbc, 0xc0, 0x54, 0xff, 0x9a, 0x29, 0x55, 0x7c, 0x9a, 0x2e, 0x60, 0x1d, 0xcf, 0xfe, 0x74, + 0x72, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0xd5, 0xed, 0xf8, 0x16, 0xda, 0x8f, 0xaf, 0xfd, 0x63, + 0x83, 0x30, 0x66, 0x34, 0xd6, 0x0a, 0xbb, 0x38, 0x73, 0x2f, 0xd1, 0x0b, 0xc8, 0x89, 0x88, 0xd8, + 0x7f, 0x76, 0xe7, 0xad, 0xa2, 0x5f, 0x52, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x93, 0x30, 0x58, 0x77, + 0x42, 0x26, 0xb1, 0x24, 0x62, 0xdf, 0x75, 0x43, 0x2c, 0x7e, 0x10, 0x3a, 0x61, 0xa4, 0xdd, 0xfa, + 0x9c, 0x76, 0x4c, 0x92, 0xde, 0x94, 0x94, 0xbf, 0x92, 0xd6, 0x9d, 0xaa, 0x13, 0x94, 0x09, 0xdb, + 0xc1, 0x1c, 0x86, 0x2e, 0xb2, 0xa3, 0x95, 0xae, 0x8a, 0x79, 0xca, 0x8d, 0xb2, 0x65, 0xd6, 0x6b, + 0x30, 0xd9, 0x0a, 0x86, 0x0d, 0xcc, 0xf8, 0x4d, 0xd6, 0xd7, 0xe6, 0x4d, 0xf6, 0x04, 0xf4, 0xb3, + 0x1f, 0x6a, 0x05, 0xa8, 0xd9, 0x58, 0xe6, 0xc5, 0x58, 0xc2, 0x93, 0x0b, 0x66, 0xa0, 0xbb, 0x05, + 0x43, 0x5f, 0x7d, 0x62, 0x51, 0x33, 0xb3, 0x8b, 0x01, 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0x0c, + 0xfd, 0xac, 0x05, 0xc8, 0xa9, 0xd3, 0xd7, 0x32, 0x2d, 0x56, 0x8f, 0x1b, 0x60, 0xac, 0xf6, 0xcb, + 0x1d, 0x87, 0xbd, 0x15, 0xce, 0xcc, 0xa6, 0x6a, 0x73, 0x49, 0xe9, 0x4b, 0xa2, 0x8b, 0x28, 0x8d, + 0xa0, 0x5f, 0x46, 0x57, 0xdd, 0x30, 0xfa, 0xec, 0x1f, 0x27, 0x2e, 0xa7, 0x8c, 0x2e, 0xa1, 0xeb, + 0xfa, 0xe3, 0x6b, 0xe8, 0x80, 0x8f, 0xaf, 0x91, 0xdc, 0x87, 0xd7, 0x77, 0x26, 0x1e, 0x30, 0xc3, + 0xec, 0xcb, 0x1f, 0xeb, 0xf0, 0x80, 0x11, 0xe2, 0xf4, 0x6e, 0x9e, 0x31, 0x65, 0xa1, 0x07, 0x1e, + 0x61, 0x5d, 0x6e, 0xff, 0x08, 0xbe, 0x1e, 0x92, 0x60, 0xee, 0x84, 0x54, 0x13, 0xef, 0xeb, 0xbc, + 0x87, 0xa6, 0x37, 0xfe, 0x1e, 0x0b, 0xa6, 0xd2, 0x03, 0xc4, 0xbb, 0x34, 0x35, 0xca, 0xfa, 0x6f, + 0xb7, 0x1b, 0x19, 0xd1, 0x79, 0x69, 0xee, 0x3a, 0x35, 0x9b, 0x43, 0x0b, 0xe7, 0xb6, 0x32, 0xdd, + 0x82, 0xe3, 0x39, 0xf3, 0x9e, 0x21, 0xb5, 0x5e, 0xd0, 0xa5, 0xd6, 0x1d, 0x64, 0x9d, 0x33, 0x72, + 0x66, 0x66, 0xde, 0x68, 0x39, 0x5e, 0xe4, 0x46, 0x3b, 0xba, 0x94, 0xdb, 0x03, 0x73, 0x40, 0xd0, + 0x27, 0xa0, 0xb7, 0xee, 0x7a, 0xad, 0x3b, 0xe2, 0xa6, 0x3c, 0x9b, 0xfd, 0x88, 0xf1, 0x5a, 0x77, + 0xcc, 0x21, 0x2e, 0xd1, 0x0d, 0xc9, 0xca, 0xf7, 0x77, 0x4b, 0x28, 0x8d, 0x80, 0x39, 0x55, 0xfb, + 0x49, 0x18, 0x5d, 0x70, 0x48, 0xc3, 0xf7, 0x16, 0xbd, 0x5a, 0xd3, 0x77, 0xbd, 0x08, 0x4d, 0x41, + 0x0f, 0x63, 0x11, 0xf9, 0x05, 0xd9, 0x43, 0x87, 0x10, 0xb3, 0x12, 0x7b, 0x03, 0x8e, 0x2e, 0xf8, + 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9b, 0x2d, 0x2f, 0x6b, 0x52, 0xbf, 0x55, 0x29, 0x75, 0xb2, 0xf2, + 0xdf, 0xf4, 0x5a, 0x4d, 0xbe, 0x94, 0x96, 0xdc, 0x3a, 0xc9, 0x91, 0xcd, 0xfe, 0x58, 0xc1, 0x68, + 0x29, 0xc6, 0x57, 0x9a, 0x45, 0x2b, 0xd7, 0x28, 0xe1, 0x0d, 0x18, 0x58, 0x77, 0x49, 0xbd, 0x86, + 0xc9, 0xba, 0x98, 0x8d, 0xc7, 0xf3, 0xcd, 0x16, 0x97, 0x28, 0xa6, 0x52, 0x81, 0x32, 0x99, 0xd5, + 0x92, 0xa8, 0x8c, 0x15, 0x19, 0xb4, 0x05, 0xe3, 0x72, 0xce, 0x24, 0x54, 0x9c, 0xda, 0x4f, 0xb4, + 0x5b, 0x84, 0x26, 0x71, 0x66, 0xc2, 0x8d, 0x13, 0x64, 0x70, 0x8a, 0x30, 0x3a, 0x09, 0x3d, 0x0d, + 0xca, 0x9f, 0xf4, 0xb0, 0xe1, 0x67, 0x42, 0x2a, 0x26, 0x6f, 0x63, 0xa5, 0xf6, 0x4f, 0x58, 0x70, + 0x3c, 0x35, 0x32, 0x42, 0xee, 0x78, 0x9f, 0x67, 0x21, 0x29, 0x07, 0x2c, 0x74, 0x96, 0x03, 0xda, + 0xff, 0xc8, 0x82, 0x23, 0x8b, 0x8d, 0x66, 0xb4, 0xb3, 0xe0, 0x9a, 0x16, 0x04, 0x2f, 0x42, 0x5f, + 0x83, 0xd4, 0xdc, 0x56, 0x43, 0xcc, 0x5c, 0x49, 0xde, 0xe1, 0x2b, 0xac, 0x94, 0x9e, 0x03, 0x95, + 0xc8, 0x0f, 0x9c, 0x0d, 0xc2, 0x0b, 0xb0, 0x40, 0x67, 0x9c, 0x90, 0x7b, 0x97, 0x5c, 0x75, 0x1b, + 0x6e, 0x74, 0x6f, 0xbb, 0x4b, 0x28, 0xff, 0x25, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0x98, + 0x5c, 0xf7, 0xb3, 0xb5, 0x5a, 0x40, 0xc2, 0x10, 0x4d, 0x43, 0xc1, 0x6d, 0x8a, 0x5e, 0x82, 0xe8, + 0x65, 0x61, 0xb9, 0x8c, 0x0b, 0x6e, 0x53, 0x3e, 0xba, 0x18, 0x9b, 0x50, 0x34, 0xed, 0x20, 0x2e, + 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x39, 0x18, 0xf0, 0xfc, 0x1a, 0x7f, 0xb7, 0x08, 0x4d, 0x38, 0xc5, + 0x5c, 0x15, 0x65, 0x58, 0x41, 0x51, 0x19, 0x06, 0xb9, 0x95, 0x6c, 0xbc, 0x68, 0xbb, 0xb2, 0xb5, + 0x65, 0x5f, 0xb6, 0x26, 0x6b, 0xe2, 0x98, 0x88, 0xfd, 0x5b, 0x16, 0x0c, 0xcb, 0x2f, 0xeb, 0xf2, + 0x45, 0x49, 0xb7, 0x56, 0xfc, 0x9a, 0x8c, 0xb7, 0x16, 0x7d, 0x11, 0x32, 0x88, 0xf1, 0x10, 0x2c, + 0x1e, 0xe8, 0x21, 0x78, 0x01, 0x86, 0x9c, 0x66, 0xb3, 0x6c, 0xbe, 0x22, 0xd9, 0x52, 0x9a, 0x8d, + 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x78, 0x01, 0x46, 0xe5, 0x17, 0x54, 0x5a, 0xb7, 0x42, 0x12, 0xa1, + 0x35, 0x18, 0x74, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x1f, 0xc9, 0x96, 0x6e, 0x1a, 0x53, 0x1a, 0xb3, + 0xc3, 0xb3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x3a, 0x4c, 0x78, 0x7e, 0xc4, 0x58, 0x23, 0x05, 0x6f, + 0xa7, 0x70, 0x4e, 0x52, 0x3f, 0x21, 0xa8, 0x4f, 0xac, 0x26, 0xa9, 0xe0, 0x34, 0x61, 0xb4, 0x28, + 0x25, 0xc6, 0xc5, 0x7c, 0x51, 0x9f, 0x3e, 0x71, 0xd9, 0x02, 0x63, 0xfb, 0xd7, 0x2d, 0x18, 0x94, + 0x68, 0x87, 0x61, 0x5b, 0xb0, 0x02, 0xfd, 0x21, 0x9b, 0x04, 0x39, 0x34, 0x76, 0xbb, 0x8e, 0xf3, + 0xf9, 0x8a, 0x39, 0x3e, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0x53, 0x18, 0xaa, 0xee, 0xbf, 0x4b, 0x14, + 0x86, 0xaa, 0x3f, 0x39, 0x97, 0xd2, 0x9f, 0xb2, 0x3e, 0x6b, 0x12, 0x78, 0xfa, 0x30, 0x69, 0x06, + 0x64, 0xdd, 0xbd, 0x93, 0x7c, 0x98, 0x94, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x16, 0x0c, 0x57, 0xa5, + 0xa6, 0x28, 0xde, 0xe1, 0x67, 0xdb, 0x6a, 0x2d, 0x95, 0x82, 0x9b, 0x4b, 0x3a, 0xe7, 0xb5, 0xfa, + 0xd8, 0xa0, 0x66, 0x5a, 0x81, 0x15, 0x3b, 0x59, 0x81, 0xc5, 0x74, 0xf3, 0x6d, 0xa2, 0x7e, 0xd2, + 0x82, 0x3e, 0xae, 0x21, 0xe8, 0x4e, 0x41, 0xa3, 0xe9, 0xfb, 0xe3, 0xb1, 0xbb, 0x41, 0x0b, 0x05, + 0x67, 0x83, 0x56, 0x60, 0x90, 0xfd, 0x60, 0x1a, 0x8e, 0x62, 0xbe, 0xcf, 0x18, 0x6f, 0x55, 0xef, + 0xe0, 0x0d, 0x59, 0x0d, 0xc7, 0x14, 0xec, 0x1f, 0x2d, 0xd2, 0xd3, 0x2d, 0x46, 0x35, 0x2e, 0x7d, + 0xeb, 0xc1, 0x5d, 0xfa, 0x85, 0x07, 0x75, 0xe9, 0x6f, 0xc0, 0x58, 0x55, 0xb3, 0x0e, 0x88, 0x67, + 0xf2, 0x5c, 0xdb, 0x45, 0xa2, 0x19, 0x12, 0x70, 0x19, 0xea, 0xbc, 0x49, 0x04, 0x27, 0xa9, 0xa2, + 0x8f, 0xc3, 0x30, 0x9f, 0x67, 0xd1, 0x0a, 0x37, 0xa4, 0x7b, 0x2c, 0x7f, 0xbd, 0xe8, 0x4d, 0x70, + 0x99, 0xbb, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0xbf, 0xb4, 0x00, 0x2d, 0x36, 0x37, 0x49, 0x83, 0x04, + 0x4e, 0x3d, 0x56, 0xf2, 0x7d, 0xc1, 0x82, 0x29, 0x92, 0x2a, 0x9e, 0xf7, 0x1b, 0x0d, 0xf1, 0xa4, + 0xcf, 0x91, 0x3a, 0x2d, 0xe6, 0xd4, 0x89, 0xd9, 0xfa, 0x3c, 0x0c, 0x9c, 0xdb, 0x1e, 0x5a, 0x81, + 0x49, 0x7e, 0x4b, 0x2a, 0x80, 0x66, 0x6b, 0xf7, 0x90, 0x20, 0x3c, 0xb9, 0x96, 0x46, 0xc1, 0x59, + 0xf5, 0xec, 0x5f, 0x1f, 0x81, 0xdc, 0x5e, 0xbc, 0xaf, 0xdd, 0x7c, 0x5f, 0xbb, 0xf9, 0xbe, 0x76, + 0xf3, 0x7d, 0xed, 0xe6, 0xfb, 0xda, 0xcd, 0xf7, 0xb5, 0x9b, 0xef, 0x52, 0xed, 0xe6, 0xdf, 0xb6, + 0xe0, 0xa8, 0xba, 0xbe, 0x8c, 0x07, 0xfb, 0x67, 0x60, 0x92, 0x6f, 0xb7, 0xf9, 0xba, 0xe3, 0x36, + 0xd6, 0x48, 0xa3, 0x59, 0x77, 0x22, 0x69, 0xc3, 0x74, 0x21, 0x73, 0xe5, 0x26, 0x1c, 0x25, 0x8c, + 0x8a, 0xdc, 0xe3, 0x2c, 0x03, 0x80, 0xb3, 0x9a, 0xb1, 0x7f, 0x65, 0x00, 0x7a, 0x17, 0xb7, 0x89, + 0x17, 0x1d, 0xc2, 0xd3, 0xa6, 0x0a, 0xa3, 0xae, 0xb7, 0xed, 0xd7, 0xb7, 0x49, 0x8d, 0xc3, 0x0f, + 0xf2, 0x02, 0x3f, 0x26, 0x48, 0x8f, 0x2e, 0x1b, 0x24, 0x70, 0x82, 0xe4, 0x83, 0xd0, 0x11, 0x5d, + 0x82, 0x3e, 0x7e, 0xf9, 0x08, 0x05, 0x51, 0xe6, 0x99, 0xcd, 0x06, 0x51, 0x5c, 0xa9, 0xb1, 0xfe, + 0x8a, 0x5f, 0x6e, 0xa2, 0x3a, 0xfa, 0x34, 0x8c, 0xae, 0xbb, 0x41, 0x18, 0xad, 0xb9, 0x0d, 0x7a, + 0x35, 0x34, 0x9a, 0xf7, 0xa0, 0x13, 0x52, 0xe3, 0xb0, 0x64, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x03, + 0x46, 0xea, 0x8e, 0xde, 0x54, 0xff, 0x81, 0x9b, 0x52, 0xb7, 0xc3, 0x55, 0x9d, 0x10, 0x36, 0xe9, + 0xd2, 0xed, 0x54, 0x65, 0x6a, 0x8d, 0x01, 0x26, 0xce, 0x50, 0xdb, 0x89, 0xeb, 0x33, 0x38, 0x8c, + 0x32, 0x68, 0xcc, 0xdd, 0x60, 0xd0, 0x64, 0xd0, 0x34, 0xa7, 0x82, 0x4f, 0xc1, 0x20, 0xa1, 0x43, + 0x48, 0x09, 0x8b, 0x0b, 0xe6, 0x7c, 0x77, 0x7d, 0x5d, 0x71, 0xab, 0x81, 0x6f, 0x6a, 0xe3, 0x16, + 0x25, 0x25, 0x1c, 0x13, 0x45, 0xf3, 0xd0, 0x17, 0x92, 0xc0, 0x55, 0x12, 0xff, 0x36, 0xd3, 0xc8, + 0xd0, 0xb8, 0x4b, 0x23, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x1c, 0x26, 0x8a, 0x65, 0x97, 0x81, + 0xb6, 0xbc, 0x66, 0x59, 0x29, 0x16, 0x50, 0xf4, 0x3a, 0xf4, 0x07, 0xa4, 0xce, 0xd4, 0xbd, 0x23, + 0xdd, 0x2f, 0x72, 0xae, 0x3d, 0xe6, 0xf5, 0xb0, 0x24, 0x80, 0xae, 0x00, 0x0a, 0x08, 0x65, 0xf0, + 0x5c, 0x6f, 0x43, 0x19, 0xe1, 0x8b, 0x83, 0x56, 0x31, 0xd2, 0x38, 0xc6, 0x90, 0xde, 0xac, 0x38, + 0xa3, 0x1a, 0xba, 0x04, 0x13, 0xaa, 0x74, 0xd9, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x63, 0x8c, 0x96, + 0x92, 0xaf, 0xe0, 0x24, 0x02, 0x4e, 0xd7, 0xb1, 0x7f, 0xde, 0x02, 0x3e, 0xce, 0x87, 0x20, 0x55, + 0x78, 0xd5, 0x94, 0x2a, 0x9c, 0xc8, 0x9d, 0xb9, 0x1c, 0x89, 0xc2, 0xcf, 0x5b, 0x30, 0xa4, 0xcd, + 0x6c, 0xbc, 0x66, 0xad, 0x36, 0x6b, 0xb6, 0x05, 0xe3, 0x74, 0xa5, 0x5f, 0xbb, 0x15, 0x92, 0x60, + 0x9b, 0xd4, 0xd8, 0xc2, 0x2c, 0xdc, 0xdb, 0xc2, 0x54, 0x06, 0xbf, 0x57, 0x13, 0x04, 0x71, 0xaa, + 0x09, 0xfb, 0x53, 0xb2, 0xab, 0xca, 0x3e, 0xba, 0xaa, 0xe6, 0x3c, 0x61, 0x1f, 0xad, 0x66, 0x15, + 0xc7, 0x38, 0x74, 0xab, 0x6d, 0xfa, 0x61, 0x94, 0xb4, 0x8f, 0xbe, 0xec, 0x87, 0x11, 0x66, 0x10, + 0xfb, 0x39, 0x80, 0xc5, 0x3b, 0xa4, 0xca, 0x57, 0xac, 0xfe, 0xe8, 0xb1, 0xf2, 0x1f, 0x3d, 0xf6, + 0x1f, 0x58, 0x30, 0xba, 0x34, 0x6f, 0xdc, 0x5c, 0x33, 0x00, 0xfc, 0xa5, 0x76, 0xf3, 0xe6, 0xaa, + 0x34, 0xd2, 0xe1, 0x76, 0x0a, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x04, 0x14, 0xeb, 0x2d, 0x4f, 0x88, + 0x3d, 0xfb, 0xe9, 0xf5, 0x78, 0xb5, 0xe5, 0x61, 0x5a, 0xa6, 0x79, 0xb2, 0x15, 0xbb, 0xf6, 0x64, + 0xeb, 0x18, 0x50, 0x07, 0x95, 0xa0, 0xf7, 0xf6, 0x6d, 0xb7, 0xc6, 0xe3, 0x04, 0x08, 0x03, 0xa2, + 0x9b, 0x37, 0x97, 0x17, 0x42, 0xcc, 0xcb, 0xed, 0x2f, 0x16, 0x61, 0x7a, 0xa9, 0x4e, 0xee, 0xbc, + 0xc3, 0x58, 0x09, 0xdd, 0xfa, 0xe1, 0x1d, 0x4c, 0x80, 0x74, 0x50, 0x5f, 0xcb, 0xce, 0xe3, 0xb1, + 0x0e, 0xfd, 0xdc, 0x3c, 0x58, 0x46, 0x4e, 0xc8, 0x54, 0xca, 0xe6, 0x0f, 0xc8, 0x0c, 0x37, 0x33, + 0x16, 0x4a, 0x59, 0x75, 0x61, 0x8a, 0x52, 0x2c, 0x89, 0x4f, 0xbf, 0x04, 0xc3, 0x3a, 0xe6, 0x81, + 0xbc, 0x9e, 0xbf, 0xb7, 0x08, 0xe3, 0xb4, 0x07, 0x0f, 0x74, 0x22, 0xae, 0xa7, 0x27, 0xe2, 0x7e, + 0x7b, 0xbe, 0x76, 0x9e, 0x8d, 0xb7, 0x92, 0xb3, 0x71, 0x21, 0x6f, 0x36, 0x0e, 0x7b, 0x0e, 0xbe, + 0xcf, 0x82, 0xc9, 0xa5, 0xba, 0x5f, 0xdd, 0x4a, 0x78, 0xa7, 0xbe, 0x00, 0x43, 0xf4, 0x38, 0x0e, + 0x8d, 0x40, 0x2d, 0x46, 0xe8, 0x1e, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xfa, 0xf5, 0xe5, 0x85, + 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xf7, 0x2c, 0x38, 0x75, 0x69, 0x7e, 0x31, 0x5e, + 0x8a, 0xa9, 0xa0, 0x43, 0x67, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, 0x2f, + 0x04, 0xf4, 0xdd, 0x12, 0xdf, 0xeb, 0x3a, 0xc0, 0x25, 0x5c, 0x9e, 0x17, 0xe7, 0xae, 0xd4, 0x02, + 0x59, 0xb9, 0x5a, 0xa0, 0xc7, 0xa0, 0x9f, 0xde, 0x0b, 0x6e, 0x55, 0xf6, 0x9b, 0x9b, 0x5d, 0xf0, + 0x22, 0x2c, 0x61, 0xf6, 0xcf, 0x59, 0x30, 0x79, 0xc9, 0x8d, 0xe8, 0xa5, 0x9d, 0x8c, 0xaa, 0x43, + 0x6f, 0xed, 0xd0, 0x8d, 0xfc, 0x60, 0x27, 0x19, 0x55, 0x07, 0x2b, 0x08, 0xd6, 0xb0, 0xf8, 0x07, + 0x6d, 0xbb, 0xcc, 0xdf, 0xa5, 0x60, 0xea, 0xdd, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x8e, 0x57, 0xcd, + 0x0d, 0x98, 0xc8, 0x72, 0x47, 0x1c, 0xdc, 0x6a, 0xbc, 0x16, 0x24, 0x00, 0xc7, 0x38, 0xf6, 0x9f, + 0x5b, 0x50, 0xba, 0xc4, 0xbd, 0x76, 0xd7, 0xc3, 0x9c, 0x43, 0xf7, 0x39, 0x18, 0x24, 0x52, 0x41, + 0x20, 0x7a, 0xad, 0x18, 0x51, 0xa5, 0x39, 0xe0, 0xc1, 0x7d, 0x14, 0x5e, 0x17, 0x2e, 0xf4, 0x07, + 0xf3, 0x81, 0x5e, 0x02, 0x44, 0xf4, 0xb6, 0xf4, 0x68, 0x47, 0x2c, 0x6c, 0xca, 0x62, 0x0a, 0x8a, + 0x33, 0x6a, 0xd8, 0x3f, 0x61, 0xc1, 0x51, 0xf5, 0xc1, 0xef, 0xba, 0xcf, 0xb4, 0xbf, 0x5a, 0x80, + 0x91, 0xcb, 0x6b, 0x6b, 0xe5, 0x4b, 0x24, 0xd2, 0x56, 0x65, 0x7b, 0xb5, 0x3f, 0xd6, 0xb4, 0x97, + 0xed, 0xde, 0x88, 0xad, 0xc8, 0xad, 0xcf, 0xf0, 0x18, 0x7e, 0x33, 0xcb, 0x5e, 0x74, 0x2d, 0xa8, + 0x44, 0x81, 0xeb, 0x6d, 0x64, 0xae, 0x74, 0xc9, 0xb3, 0x14, 0xf3, 0x78, 0x16, 0xf4, 0x1c, 0xf4, + 0xb1, 0x20, 0x82, 0x72, 0x12, 0x1e, 0x52, 0x4f, 0x2c, 0x56, 0xba, 0xbf, 0x5b, 0x1a, 0xbc, 0x8e, + 0x97, 0xf9, 0x1f, 0x2c, 0x50, 0xd1, 0x75, 0x18, 0xda, 0x8c, 0xa2, 0xe6, 0x65, 0xe2, 0xd4, 0x48, + 0x20, 0x4f, 0xd9, 0xd3, 0x59, 0xa7, 0x2c, 0x1d, 0x04, 0x8e, 0x16, 0x1f, 0x4c, 0x71, 0x59, 0x88, + 0x75, 0x3a, 0x76, 0x05, 0x20, 0x86, 0xdd, 0x27, 0xc5, 0x8d, 0xbd, 0x06, 0x83, 0xf4, 0x73, 0x67, + 0xeb, 0xae, 0xd3, 0x5e, 0x35, 0xfe, 0x14, 0x0c, 0x4a, 0xc5, 0x77, 0x28, 0x42, 0x7c, 0xb0, 0x1b, + 0x49, 0xea, 0xc5, 0x43, 0x1c, 0xc3, 0xed, 0x47, 0x41, 0x58, 0x00, 0xb7, 0x23, 0x69, 0xaf, 0xc3, + 0x11, 0x66, 0xca, 0xec, 0x44, 0x9b, 0xc6, 0x1a, 0xed, 0xbc, 0x18, 0x9e, 0x16, 0xef, 0x3a, 0xfe, + 0x65, 0x53, 0x9a, 0x0b, 0xf9, 0xb0, 0xa4, 0x18, 0xbf, 0xf1, 0xec, 0x3f, 0xeb, 0x81, 0x87, 0x96, + 0x2b, 0xf9, 0xb1, 0xa9, 0x2e, 0xc2, 0x30, 0x67, 0x17, 0xe9, 0xd2, 0x70, 0xea, 0xa2, 0x5d, 0x25, + 0x01, 0x5d, 0xd3, 0x60, 0xd8, 0xc0, 0x44, 0xa7, 0xa0, 0xe8, 0xbe, 0xed, 0x25, 0x1d, 0x2c, 0x97, + 0xdf, 0x58, 0xc5, 0xb4, 0x9c, 0x82, 0x29, 0xe7, 0xc9, 0x8f, 0x74, 0x05, 0x56, 0xdc, 0xe7, 0xab, + 0x30, 0xea, 0x86, 0xd5, 0xd0, 0x5d, 0xf6, 0xe8, 0x3e, 0xd5, 0x76, 0xba, 0x92, 0x39, 0xd0, 0x4e, + 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0xf7, 0x4b, 0x6f, 0xd7, 0xdc, 0x6b, 0xc7, 0xc8, 0x18, 0xf4, 0xf8, + 0x6f, 0xb2, 0xaf, 0x0b, 0x99, 0x08, 0x5e, 0x1c, 0xff, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0x41, + 0x57, 0xdd, 0x74, 0x9a, 0xb3, 0xad, 0x68, 0x73, 0xc1, 0x0d, 0xab, 0xfe, 0x36, 0x09, 0x76, 0xd8, + 0x5b, 0x7c, 0x20, 0x7e, 0xd0, 0x29, 0xc0, 0xfc, 0xe5, 0xd9, 0x32, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, + 0x16, 0xc6, 0x64, 0x61, 0x85, 0x84, 0xec, 0x0a, 0x18, 0x62, 0x64, 0x94, 0xcb, 0xa3, 0x28, 0x56, + 0x44, 0x92, 0xf8, 0x26, 0x83, 0x0b, 0xf7, 0x83, 0xc1, 0x7d, 0x11, 0x46, 0x5c, 0xcf, 0x8d, 0x5c, + 0x27, 0xf2, 0xb9, 0xfe, 0x88, 0x3f, 0xbb, 0x99, 0x80, 0x79, 0x59, 0x07, 0x60, 0x13, 0xcf, 0xfe, + 0x6f, 0x3d, 0x30, 0xc1, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0x5e, 0x5a, 0x61, 0xd7, 0xd3, 0x2b, 0xec, + 0x7e, 0x70, 0xee, 0xf7, 0xbc, 0xcc, 0x3e, 0x67, 0xc1, 0x04, 0x93, 0x71, 0x1b, 0xcb, 0xec, 0x3c, + 0x0c, 0x06, 0x86, 0x37, 0xea, 0xa0, 0xae, 0xd4, 0x92, 0x8e, 0xa5, 0x31, 0x0e, 0x7a, 0x0d, 0xa0, + 0x19, 0xcb, 0xd0, 0x0b, 0x46, 0x08, 0x51, 0xc8, 0x15, 0x9f, 0x6b, 0x75, 0xec, 0x4f, 0xc3, 0xa0, + 0x72, 0x37, 0x95, 0xfe, 0xe6, 0x56, 0x8e, 0xbf, 0x79, 0x67, 0x36, 0x42, 0xda, 0xc6, 0x15, 0x33, + 0x6d, 0xe3, 0xbe, 0x6c, 0x41, 0xac, 0xe1, 0x40, 0x6f, 0xc0, 0x60, 0xd3, 0x67, 0x06, 0xd1, 0x81, + 0xf4, 0x32, 0x78, 0xb4, 0xad, 0x8a, 0x84, 0xc7, 0x09, 0x0c, 0xf8, 0x74, 0x94, 0x65, 0x55, 0x1c, + 0x53, 0x41, 0x57, 0xa0, 0xbf, 0x19, 0x90, 0x4a, 0xc4, 0x82, 0x58, 0x75, 0x4f, 0x90, 0x2f, 0x5f, + 0x5e, 0x11, 0x4b, 0x0a, 0xf6, 0x2f, 0x16, 0x60, 0x3c, 0x89, 0x8a, 0x5e, 0x81, 0x1e, 0x72, 0x87, + 0x54, 0x45, 0x7f, 0x33, 0x79, 0x82, 0x58, 0x46, 0xc2, 0x07, 0x80, 0xfe, 0xc7, 0xac, 0x16, 0xba, + 0x0c, 0xfd, 0x94, 0x21, 0xb8, 0xa4, 0x02, 0x36, 0x3e, 0x9c, 0xc7, 0x54, 0x28, 0xce, 0x8a, 0x77, + 0x4e, 0x14, 0x61, 0x59, 0x9d, 0x19, 0xa4, 0x55, 0x9b, 0x15, 0xfa, 0xd6, 0x8a, 0xda, 0x89, 0x04, + 0xd6, 0xe6, 0xcb, 0x1c, 0x49, 0x50, 0xe3, 0x06, 0x69, 0xb2, 0x10, 0xc7, 0x44, 0xd0, 0x6b, 0xd0, + 0x1b, 0xd6, 0x09, 0x69, 0x0a, 0x8b, 0x83, 0x4c, 0x29, 0x67, 0x85, 0x22, 0x08, 0x4a, 0x4c, 0x2a, + 0xc2, 0x0a, 0x30, 0xaf, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x2d, 0xf8, 0x1c, 0x6f, 0x83, 0x1c, 0x82, + 0x62, 0x60, 0x01, 0x7a, 0xc2, 0x26, 0xa9, 0xb6, 0xb3, 0xf6, 0x8f, 0xfb, 0x53, 0x69, 0x92, 0x6a, + 0xbc, 0x66, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x0f, 0x30, 0x1a, 0xa3, 0x2d, 0x47, 0xa4, 0x81, + 0x9e, 0x31, 0xa2, 0xdc, 0x9c, 0x48, 0x44, 0xb9, 0x19, 0x64, 0xd8, 0x9a, 0x0c, 0xfa, 0xd3, 0x50, + 0x6c, 0x38, 0x77, 0x84, 0x90, 0xf1, 0xa9, 0xf6, 0xdd, 0xa0, 0xf4, 0x67, 0x56, 0x9c, 0x3b, 0xfc, + 0x1d, 0xfe, 0x94, 0xdc, 0x63, 0x2b, 0xce, 0x9d, 0x8e, 0x16, 0xe9, 0xb4, 0x11, 0xd6, 0x96, 0xeb, + 0x09, 0xe3, 0xb4, 0xae, 0xda, 0x72, 0xbd, 0x64, 0x5b, 0xae, 0xd7, 0x45, 0x5b, 0xae, 0x87, 0xee, + 0x42, 0xbf, 0xb0, 0x1d, 0x15, 0xe1, 0xf7, 0xce, 0x77, 0xd1, 0x9e, 0x30, 0x3d, 0xe5, 0x6d, 0x9e, + 0x97, 0x72, 0x06, 0x51, 0xda, 0xb1, 0x5d, 0xd9, 0x20, 0xfa, 0x3b, 0x16, 0x8c, 0x8a, 0xdf, 0x98, + 0xbc, 0xdd, 0x22, 0x61, 0x24, 0xf8, 0xf0, 0x0f, 0x75, 0xdf, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x43, + 0xf2, 0xca, 0x34, 0x81, 0x1d, 0x7b, 0x94, 0xe8, 0x05, 0xfa, 0x45, 0x0b, 0x8e, 0x34, 0x9c, 0x3b, + 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x0d, 0xc6, 0x2b, 0xdd, 0x4d, 0x7f, 0xaa, 0x3a, + 0xef, 0xa4, 0x54, 0xb8, 0x1e, 0xc9, 0x42, 0xe9, 0xd8, 0xd5, 0xcc, 0x7e, 0x4d, 0xaf, 0xc3, 0x80, + 0x5c, 0x6f, 0x0f, 0xd2, 0x30, 0x9e, 0xb5, 0x23, 0xd6, 0xda, 0x03, 0x6d, 0xe7, 0xd3, 0x30, 0xac, + 0xaf, 0xb1, 0x07, 0xda, 0xd6, 0xdb, 0x30, 0x99, 0xb1, 0x96, 0x1e, 0x68, 0x93, 0xb7, 0xe1, 0x44, + 0xee, 0xfa, 0x78, 0xa0, 0x8e, 0x0d, 0x5f, 0xb5, 0xf4, 0x73, 0xf0, 0x10, 0xb4, 0x33, 0xf3, 0xa6, + 0x76, 0xe6, 0x74, 0xfb, 0x9d, 0x93, 0xa3, 0xa2, 0x79, 0x4b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0xeb, + 0xd0, 0x57, 0xa7, 0x25, 0xd2, 0x02, 0xd9, 0xee, 0xbc, 0x23, 0x63, 0xbe, 0x98, 0x95, 0x87, 0x58, + 0x50, 0xb0, 0xbf, 0x64, 0x41, 0x86, 0x6b, 0x06, 0xe5, 0x93, 0x5a, 0x6e, 0x8d, 0x0d, 0x49, 0x31, + 0xe6, 0x93, 0x54, 0x10, 0x98, 0x53, 0x50, 0xdc, 0x70, 0x6b, 0xc2, 0xb3, 0x58, 0x81, 0x2f, 0x51, + 0xf0, 0x86, 0x5b, 0x43, 0x4b, 0x80, 0xc2, 0x56, 0xb3, 0x59, 0x67, 0x66, 0x4b, 0x4e, 0xfd, 0x52, + 0xe0, 0xb7, 0x9a, 0xdc, 0xdc, 0xb8, 0xc8, 0x85, 0x44, 0x95, 0x14, 0x14, 0x67, 0xd4, 0xb0, 0x7f, + 0xd5, 0x82, 0x9e, 0x43, 0x98, 0x26, 0x6c, 0x4e, 0xd3, 0x33, 0xb9, 0xa4, 0x45, 0xd6, 0x86, 0x19, + 0xec, 0xdc, 0x5e, 0xbc, 0x13, 0x11, 0x2f, 0x64, 0x0c, 0x47, 0xe6, 0xac, 0xed, 0x5a, 0x30, 0x79, + 0xd5, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x24, 0x58, 0xf6, 0x36, 0x0e, 0x64, 0xdb, 0x5f, + 0xe8, 0x68, 0xdb, 0x7f, 0x11, 0xfa, 0xdc, 0xa6, 0x16, 0xf6, 0xfd, 0x0c, 0x9d, 0xdd, 0xe5, 0xb2, + 0x88, 0xf8, 0x8e, 0x8c, 0xc6, 0x59, 0x29, 0x16, 0xf8, 0x74, 0x59, 0x72, 0xa3, 0xba, 0x9e, 0xfc, + 0x65, 0x49, 0xdf, 0x3a, 0xc9, 0x70, 0x66, 0x86, 0xf9, 0xf7, 0x26, 0x18, 0x4d, 0x08, 0x0f, 0x46, + 0x0c, 0xfd, 0x2e, 0xff, 0x52, 0xb1, 0x36, 0x1f, 0xcf, 0x7e, 0x83, 0xa4, 0x06, 0x46, 0xf3, 0xcd, + 0xe3, 0x05, 0x58, 0x12, 0xb2, 0x2f, 0x42, 0x66, 0xf8, 0x99, 0xce, 0xf2, 0x25, 0xfb, 0x63, 0x30, + 0xc1, 0x6a, 0x1e, 0x50, 0x76, 0x63, 0x27, 0xa4, 0xe2, 0x19, 0x11, 0x7c, 0xed, 0xff, 0x6c, 0x01, + 0x5a, 0xf1, 0x6b, 0xee, 0xfa, 0x8e, 0x20, 0xce, 0xbf, 0xff, 0x6d, 0x28, 0xf1, 0xc7, 0x71, 0x32, + 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xb8, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, 0x1d, + 0x77, 0xa2, 0x87, 0xde, 0x48, 0x04, 0x1d, 0xfc, 0x70, 0x2a, 0xe8, 0xe0, 0xe3, 0x99, 0x76, 0x31, + 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0xe7, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcb, 0x8c, + 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0xbe, 0x4b, 0x62, 0xff, 0xc6, 0x82, 0x38, + 0xdc, 0xd5, 0x21, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, 0xb8, + 0xd1, 0x15, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, 0xb3, + 0xf1, 0xf5, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0xfb, 0x13, 0xa8, 0x72, 0x1b, + 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x98, 0x0d, 0xcd, 0xb4, + 0xf4, 0x5c, 0xbd, 0x9a, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, 0xea, + 0xeb, 0xe0, 0x82, 0xfd, 0x15, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4b, 0x5c, 0x40, 0x54, 0x7f, 0x72, + 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x07, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x5d, 0xa2, + 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0xdd, 0xdf, 0x2d, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, 0x6b, + 0x5c, 0xc5, 0xfe, 0x69, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x80, 0xde, 0xe6, 0xa6, 0x13, 0x92, + 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0xee, 0xef, 0x96, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, 0x76, + 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0xbf, 0xb4, 0xa0, 0x67, 0x95, 0xde, 0x5e, + 0x0f, 0xfe, 0x08, 0x78, 0xd5, 0x38, 0x02, 0x4e, 0xe6, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, 0x89, + 0xdd, 0x7f, 0x3a, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, 0x3e, + 0x67, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x02, 0xfa, 0x85, 0x9f, + 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x64, 0x11, 0x8c, 0xfc, 0x48, 0xe8, 0xd7, + 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x46, 0xa5, 0xba, 0x49, 0x6a, + 0xad, 0xba, 0xeb, 0x6d, 0x2c, 0x6f, 0x78, 0xbe, 0x2a, 0x5e, 0xbc, 0x43, 0xaa, 0x2d, 0xa6, 0x1b, + 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbb, 0xb7, 0x5b, 0x9a, 0xc1, 0x07, 0xa2, 0x8d, 0x0f, + 0xd8, 0x17, 0xf4, 0x7b, 0x16, 0x9c, 0xe7, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, 0x59, + 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xa2, 0x18, 0xd0, 0xf3, 0xe5, 0x83, 0xb5, 0x85, + 0x0f, 0xda, 0x39, 0xfb, 0x5f, 0x14, 0x61, 0x44, 0x84, 0x89, 0x14, 0x77, 0xc0, 0x0b, 0xc6, 0x92, + 0x78, 0x38, 0xb1, 0x24, 0x26, 0x0c, 0xe4, 0xfb, 0x73, 0xfc, 0x87, 0x30, 0x41, 0x0f, 0xe7, 0xcb, + 0xc4, 0x09, 0xa2, 0x5b, 0xc4, 0xe1, 0x56, 0x89, 0xc5, 0x03, 0x9f, 0xfe, 0x4a, 0x3c, 0x7e, 0x35, + 0x49, 0x0c, 0xa7, 0xe9, 0xbf, 0x97, 0xee, 0x1c, 0x0f, 0xc6, 0x53, 0x91, 0x3e, 0xdf, 0x84, 0x41, + 0xe5, 0x24, 0x25, 0x0e, 0x9d, 0xf6, 0x01, 0x73, 0x93, 0x14, 0xb8, 0xd0, 0x33, 0x76, 0xd0, 0x8b, + 0xc9, 0xd9, 0xff, 0xa4, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x15, 0x06, 0x9c, 0x90, 0x05, 0xf1, 0xae, + 0xb5, 0x93, 0x4b, 0xa7, 0x9a, 0x61, 0x8e, 0x6a, 0xb3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x65, 0x6e, + 0xfb, 0xb9, 0x4d, 0xda, 0x09, 0xa5, 0x53, 0xd4, 0x40, 0x5a, 0x87, 0x6e, 0x13, 0x2c, 0xea, 0xa3, + 0x4f, 0x70, 0xe3, 0xdc, 0x2b, 0x9e, 0x7f, 0xdb, 0xbb, 0xe4, 0xfb, 0x32, 0x24, 0x50, 0x77, 0x04, + 0x27, 0xa4, 0x49, 0xae, 0xaa, 0x8e, 0x4d, 0x6a, 0xdd, 0x85, 0xce, 0xfe, 0x0c, 0xb0, 0xbc, 0x24, + 0x66, 0x4c, 0x82, 0x10, 0x11, 0x18, 0x13, 0x31, 0x48, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xe7, 0xb7, + 0x59, 0x3b, 0xd6, 0xe3, 0x5c, 0x31, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x93, 0x1f, 0xc2, 0x4b, 0xc4, + 0x89, 0x5a, 0x01, 0x09, 0xd1, 0x47, 0x61, 0x2a, 0xfd, 0x32, 0x16, 0xea, 0x10, 0x8b, 0x71, 0xcf, + 0x27, 0xf7, 0x76, 0x4b, 0x53, 0x95, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xfe, 0x59, 0x0b, 0x98, 0x27, + 0xf8, 0x21, 0x70, 0x3e, 0x1f, 0x31, 0x39, 0x9f, 0xa9, 0xbc, 0xe9, 0xcc, 0x61, 0x7a, 0x9e, 0xe7, + 0x6b, 0xb8, 0x1c, 0xf8, 0x77, 0x76, 0x84, 0xed, 0x56, 0xe7, 0x67, 0x9c, 0xfd, 0x45, 0x0b, 0x58, + 0x12, 0x1f, 0xcc, 0x5f, 0xed, 0x52, 0xc1, 0xd1, 0xd9, 0x2c, 0xe1, 0xa3, 0x30, 0xb0, 0x2e, 0x86, + 0x3f, 0x43, 0xe8, 0x64, 0x74, 0xd8, 0xa4, 0x2d, 0x27, 0x4d, 0x78, 0x74, 0x8a, 0x7f, 0x58, 0x51, + 0xb3, 0xff, 0xb1, 0x05, 0xd3, 0xf9, 0xd5, 0xd0, 0x75, 0x38, 0x1e, 0x90, 0x6a, 0x2b, 0x08, 0xe9, + 0x96, 0x10, 0x0f, 0x20, 0xe1, 0x14, 0xc5, 0xa7, 0xfa, 0xa1, 0xbd, 0xdd, 0xd2, 0x71, 0x9c, 0x8d, + 0x82, 0xf3, 0xea, 0xa2, 0x97, 0x60, 0xb4, 0x15, 0x72, 0xce, 0x8f, 0x31, 0x5d, 0xa1, 0x88, 0x14, + 0xcd, 0xfc, 0x86, 0xae, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0xbf, 0x8b, 0x2f, 0x47, 0x15, 0x2c, 0xba, + 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x68, 0xa7, 0x5b, 0x9f, 0x5d, 0x97, + 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca, 0x82, 0xe3, 0x3a, 0xa2, 0xe6, 0x0e, + 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0x3b, 0x27, + 0x57, 0xe8, 0x35, 0x51, 0xbe, 0x2f, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, 0x86, + 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, 0xfe, + 0x33, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x6d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x73, 0xf1, 0x4e, + 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0x4f, 0x75, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, 0x95, + 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc1, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, 0x26, + 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xcb, 0x45, 0x7e, 0x68, 0xb0, + 0xb7, 0xc7, 0x13, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, 0xca, + 0xbc, 0x18, 0x4b, 0x38, 0x3a, 0x07, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, 0x42, + 0xac, 0xa0, 0xe8, 0x59, 0x80, 0x66, 0xe0, 0x6f, 0xbb, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, 0xe7, + 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x65, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, 0xf7, + 0xca, 0x6e, 0xec, 0xba, 0x0e, 0xc4, 0x26, 0x2e, 0x9a, 0x85, 0xbe, 0xc8, 0x61, 0xd6, 0x66, 0xbd, + 0xf9, 0x46, 0xf4, 0x6b, 0x14, 0x43, 0xcf, 0x2c, 0x47, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x29, 0xdd, + 0xeb, 0xf9, 0x4d, 0x24, 0xbc, 0x57, 0xba, 0xbb, 0xb5, 0x34, 0xe7, 0x7a, 0xe1, 0x15, 0x63, 0xd0, + 0x42, 0x2f, 0x01, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xae, 0x6c, 0x44, 0x15, 0x23, 0xb3, 0xe0, + 0xaf, 0xfa, 0xd1, 0xf5, 0x90, 0x2c, 0x2a, 0x0c, 0xac, 0x61, 0xdb, 0xdf, 0x3f, 0x04, 0x10, 0x3f, + 0x34, 0xd0, 0x5d, 0x18, 0xa8, 0x3a, 0x4d, 0xa7, 0xca, 0xd3, 0xa6, 0x16, 0xf3, 0xbc, 0x9e, 0xe3, + 0x1a, 0x33, 0xf3, 0x02, 0x9d, 0x2b, 0x6f, 0x64, 0xc8, 0xf0, 0x01, 0x59, 0xdc, 0x51, 0x61, 0xa3, + 0xda, 0x43, 0x9f, 0xb3, 0x60, 0x48, 0x44, 0x3a, 0x62, 0x33, 0x54, 0xc8, 0xd7, 0xb7, 0x69, 0xed, + 0xcf, 0xc6, 0x35, 0x78, 0x17, 0x9e, 0x93, 0x2b, 0x54, 0x83, 0x74, 0xec, 0x85, 0xde, 0x30, 0xfa, + 0xa0, 0x7c, 0xdb, 0x16, 0x8d, 0xa1, 0x54, 0x6f, 0xdb, 0x41, 0x76, 0xd5, 0xe8, 0xcf, 0xda, 0xeb, + 0xc6, 0xb3, 0xb6, 0x27, 0xdf, 0x7f, 0xd8, 0xe0, 0xb7, 0x3b, 0xbd, 0x68, 0x51, 0x59, 0x8f, 0x25, + 0xd2, 0x9b, 0xef, 0xf4, 0xaa, 0x3d, 0xec, 0x3a, 0xc4, 0x11, 0xf9, 0x34, 0x8c, 0xd5, 0x4c, 0xae, + 0x45, 0xac, 0xc4, 0xc7, 0xf3, 0xe8, 0x26, 0x98, 0x9c, 0x98, 0x4f, 0x49, 0x00, 0x70, 0x92, 0x30, + 0x2a, 0xf3, 0xd0, 0x32, 0xcb, 0xde, 0xba, 0x2f, 0x3c, 0xa8, 0xec, 0xdc, 0xb9, 0xdc, 0x09, 0x23, + 0xd2, 0xa0, 0x98, 0x31, 0x93, 0xb0, 0x2a, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x7d, 0xcc, 0xeb, + 0x31, 0x9c, 0x1a, 0xc8, 0x57, 0x6b, 0x98, 0x91, 0x50, 0xe3, 0x0d, 0xc9, 0xfe, 0x86, 0x58, 0x50, + 0x40, 0x97, 0xa5, 0x4f, 0x71, 0xb8, 0xec, 0x5d, 0x0f, 0x09, 0xf3, 0x29, 0x1e, 0x9c, 0x7b, 0x34, + 0x76, 0x17, 0xe6, 0xe5, 0x99, 0xf9, 0x67, 0x8d, 0x9a, 0x94, 0xed, 0x13, 0xff, 0x65, 0x5a, 0x5b, + 0x11, 0xb7, 0x2d, 0xb3, 0x7b, 0x66, 0xea, 0xdb, 0x78, 0x38, 0x6f, 0x98, 0x24, 0x70, 0x92, 0x26, + 0x65, 0xa1, 0xf9, 0xae, 0x17, 0x3e, 0x58, 0x9d, 0xce, 0x0e, 0x2e, 0x39, 0x60, 0xb7, 0x11, 0x2f, + 0xc1, 0xa2, 0x3e, 0x72, 0x61, 0x2c, 0x30, 0xd8, 0x0b, 0x19, 0x6e, 0xed, 0x6c, 0x77, 0x4c, 0x8c, + 0x16, 0xc8, 0xdf, 0x24, 0x83, 0x93, 0x74, 0xd1, 0xeb, 0x1a, 0xa3, 0x34, 0xd2, 0xfe, 0xe5, 0xdf, + 0x89, 0x35, 0x9a, 0xde, 0x82, 0x11, 0xe3, 0xb0, 0x79, 0xa0, 0x2a, 0x48, 0x0f, 0xc6, 0x93, 0x27, + 0xcb, 0x03, 0xd5, 0x3c, 0xfe, 0x49, 0x0f, 0x8c, 0x9a, 0x3b, 0x01, 0x9d, 0x87, 0x41, 0x41, 0x44, + 0x65, 0xb4, 0x52, 0x9b, 0x7b, 0x45, 0x02, 0x70, 0x8c, 0xc3, 0x12, 0x99, 0xb1, 0xea, 0x9a, 0xaf, + 0x40, 0x9c, 0xc8, 0x4c, 0x41, 0xb0, 0x86, 0x45, 0x1f, 0xb0, 0xb7, 0x7c, 0x3f, 0x52, 0xf7, 0xa8, + 0xda, 0x2e, 0x73, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x3f, 0xb7, 0x48, 0xe0, 0x91, 0xba, 0x99, 0xd2, + 0x41, 0xdd, 0x9f, 0x57, 0x74, 0x20, 0x36, 0x71, 0x29, 0x17, 0xe0, 0x87, 0x6c, 0xff, 0x89, 0x67, + 0x72, 0xec, 0x7b, 0x51, 0xe1, 0x51, 0x24, 0x24, 0x1c, 0x7d, 0x0c, 0x8e, 0xab, 0xf0, 0x89, 0x62, + 0x75, 0xc9, 0x16, 0xfb, 0x0c, 0xa9, 0xd6, 0xf1, 0xf9, 0x6c, 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x0a, + 0xa3, 0xe2, 0x29, 0x25, 0x29, 0xf6, 0x9b, 0x86, 0x84, 0x57, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xa4, + 0x14, 0xec, 0x8d, 0x21, 0x29, 0x0c, 0xa4, 0x93, 0x52, 0xe8, 0x70, 0x9c, 0xaa, 0x81, 0x66, 0x61, + 0x8c, 0xb3, 0x8e, 0xae, 0xb7, 0xc1, 0xe7, 0x44, 0x78, 0x76, 0xaa, 0x4d, 0x75, 0xcd, 0x04, 0xe3, + 0x24, 0x3e, 0xba, 0x08, 0xc3, 0x4e, 0x50, 0xdd, 0x74, 0x23, 0x52, 0xa5, 0x3b, 0x83, 0xd9, 0xf2, + 0x69, 0x96, 0x98, 0xb3, 0x1a, 0x0c, 0x1b, 0x98, 0xf6, 0x5d, 0x98, 0xcc, 0x08, 0x2f, 0x43, 0x17, + 0x8e, 0xd3, 0x74, 0xe5, 0x37, 0x25, 0xdc, 0x1d, 0x66, 0xcb, 0xcb, 0xf2, 0x6b, 0x34, 0x2c, 0xba, + 0x3a, 0x59, 0x18, 0x1a, 0x2d, 0xf9, 0xb6, 0x5a, 0x9d, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xaf, + 0x0a, 0x30, 0x96, 0xa1, 0xa0, 0x63, 0x09, 0xa0, 0x13, 0x2f, 0xad, 0x38, 0xdf, 0xb3, 0x99, 0xe3, + 0xa4, 0x70, 0x80, 0x1c, 0x27, 0xc5, 0x4e, 0x39, 0x4e, 0x7a, 0xde, 0x49, 0x8e, 0x13, 0x73, 0xc4, + 0x7a, 0xbb, 0x1a, 0xb1, 0x8c, 0xbc, 0x28, 0x7d, 0x07, 0xcc, 0x8b, 0x62, 0x0c, 0x7a, 0x7f, 0x17, + 0x83, 0xfe, 0xa3, 0x05, 0x18, 0x4f, 0xea, 0xf6, 0x0e, 0x41, 0x3e, 0xfe, 0xba, 0x21, 0x1f, 0x3f, + 0xd7, 0x8d, 0x27, 0x7e, 0xae, 0xac, 0x1c, 0x27, 0x64, 0xe5, 0x4f, 0x76, 0x45, 0xad, 0xbd, 0xdc, + 0xfc, 0xef, 0x17, 0xe0, 0x68, 0xa6, 0xca, 0xf3, 0x10, 0xc6, 0xe6, 0x9a, 0x31, 0x36, 0xcf, 0x74, + 0x1d, 0xa5, 0x20, 0x77, 0x80, 0x6e, 0x26, 0x06, 0xe8, 0x7c, 0xf7, 0x24, 0xdb, 0x8f, 0xd2, 0x37, + 0x8a, 0x70, 0x3a, 0xb3, 0x5e, 0x2c, 0x5e, 0x5e, 0x32, 0xc4, 0xcb, 0xcf, 0x26, 0xc4, 0xcb, 0x76, + 0xfb, 0xda, 0xf7, 0x47, 0xde, 0x2c, 0xbc, 0xf5, 0x59, 0xcc, 0x91, 0x7b, 0x94, 0x35, 0x1b, 0xde, + 0xfa, 0x8a, 0x10, 0x36, 0xe9, 0xbe, 0x97, 0x64, 0xcc, 0xbf, 0x6b, 0xc1, 0x89, 0xcc, 0xb9, 0x39, + 0x04, 0x49, 0xdf, 0xaa, 0x29, 0xe9, 0x7b, 0xa2, 0xeb, 0xd5, 0x9a, 0x23, 0xfa, 0xfb, 0x7c, 0x5f, + 0xce, 0xb7, 0x30, 0x01, 0xc4, 0x35, 0x18, 0x72, 0xaa, 0x55, 0x12, 0x86, 0x2b, 0x7e, 0x4d, 0xa5, + 0x43, 0x78, 0x86, 0x3d, 0x0f, 0xe3, 0xe2, 0xfd, 0xdd, 0xd2, 0x74, 0x92, 0x44, 0x0c, 0xc6, 0x3a, + 0x05, 0xf4, 0x09, 0x18, 0x08, 0x65, 0x26, 0xcb, 0x9e, 0x7b, 0xcf, 0x64, 0xc9, 0x98, 0x5c, 0x25, + 0x60, 0x51, 0x24, 0xd1, 0x77, 0xea, 0xd1, 0x9f, 0xda, 0x88, 0x16, 0x79, 0x27, 0xef, 0x21, 0x06, + 0xd4, 0xb3, 0x00, 0xdb, 0xea, 0x25, 0x93, 0x14, 0x9e, 0x68, 0x6f, 0x1c, 0x0d, 0x0b, 0xbd, 0x06, + 0xe3, 0x21, 0x0f, 0x7c, 0x1a, 0x1b, 0xa9, 0xf0, 0xb5, 0xc8, 0x62, 0xc7, 0x55, 0x12, 0x30, 0x9c, + 0xc2, 0x46, 0x4b, 0xb2, 0x55, 0x66, 0x8e, 0xc4, 0x97, 0xe7, 0xd9, 0xb8, 0x45, 0x61, 0x92, 0x74, + 0x24, 0x39, 0x09, 0x6c, 0xf8, 0xb5, 0x9a, 0xe8, 0x13, 0x00, 0x74, 0x11, 0x09, 0x21, 0x4a, 0x7f, + 0xfe, 0x11, 0x4a, 0xcf, 0x96, 0x5a, 0xa6, 0x27, 0x03, 0x73, 0xb3, 0x5f, 0x50, 0x44, 0xb0, 0x46, + 0x10, 0x39, 0x30, 0x12, 0xff, 0x8b, 0x73, 0xb4, 0x9f, 0xcb, 0x6d, 0x21, 0x49, 0x9c, 0x29, 0x18, + 0x16, 0x74, 0x12, 0xd8, 0xa4, 0x88, 0x3e, 0x0e, 0x27, 0xb6, 0x73, 0x2d, 0x7f, 0x38, 0x27, 0xc8, + 0x92, 0xae, 0xe7, 0xdb, 0xfb, 0xe4, 0xd7, 0xb7, 0xff, 0x2d, 0xc0, 0x43, 0x6d, 0x4e, 0x7a, 0x34, + 0x6b, 0x6a, 0xed, 0x9f, 0x4a, 0x4a, 0x36, 0xa6, 0x33, 0x2b, 0x1b, 0xa2, 0x8e, 0xc4, 0x86, 0x2a, + 0xbc, 0xe3, 0x0d, 0xf5, 0x43, 0x96, 0x26, 0x73, 0xe2, 0x36, 0xdd, 0x1f, 0x39, 0xe0, 0x0d, 0x76, + 0x1f, 0x85, 0x50, 0xeb, 0x19, 0x92, 0x9c, 0x67, 0xbb, 0xee, 0x4e, 0xf7, 0xa2, 0x9d, 0xaf, 0x66, + 0x07, 0x7c, 0xe7, 0x42, 0x9e, 0x4b, 0x07, 0xfd, 0xfe, 0xc3, 0x0a, 0xfe, 0xfe, 0x75, 0x0b, 0x4e, + 0xa4, 0x8a, 0x79, 0x1f, 0x48, 0x28, 0xa2, 0xdd, 0xad, 0xbe, 0xe3, 0xce, 0x4b, 0x82, 0xfc, 0x1b, + 0x2e, 0x8b, 0x6f, 0x38, 0x91, 0x8b, 0x97, 0xec, 0xfa, 0x17, 0xfe, 0xb8, 0x34, 0xc9, 0x1a, 0x30, + 0x11, 0x71, 0x7e, 0xd7, 0x51, 0x13, 0xce, 0x54, 0x5b, 0x41, 0x10, 0x2f, 0xd6, 0x8c, 0xcd, 0xc9, + 0xdf, 0x7a, 0x8f, 0xee, 0xed, 0x96, 0xce, 0xcc, 0x77, 0xc0, 0xc5, 0x1d, 0xa9, 0x21, 0x0f, 0x50, + 0x23, 0x65, 0x5f, 0xc7, 0x0e, 0x80, 0x1c, 0x39, 0x4c, 0xda, 0x1a, 0x8f, 0x5b, 0xca, 0x66, 0x58, + 0xe9, 0x65, 0x50, 0x3e, 0x5c, 0xe9, 0xc9, 0xb7, 0x26, 0x2e, 0xfd, 0xf4, 0x55, 0x38, 0xdd, 0x7e, + 0x31, 0x1d, 0x28, 0x94, 0xc3, 0x1f, 0x58, 0x70, 0xaa, 0x6d, 0xbc, 0xb0, 0x6f, 0xc3, 0xc7, 0x82, + 0xfd, 0x59, 0x0b, 0x1e, 0xce, 0xac, 0x91, 0x74, 0xc2, 0xab, 0xd2, 0x42, 0xcd, 0x1c, 0x35, 0x8e, + 0x9c, 0x23, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x36, 0x0b, 0x1d, 0x2d, 0x36, 0x7f, 0xcb, 0x82, 0xd4, + 0x55, 0x7f, 0x08, 0x9c, 0xe7, 0xb2, 0xc9, 0x79, 0x3e, 0xda, 0xcd, 0x68, 0xe6, 0x30, 0x9d, 0x7f, + 0x31, 0x06, 0xc7, 0x72, 0x3c, 0xb1, 0xb7, 0x61, 0x62, 0xa3, 0x4a, 0xcc, 0xd0, 0x1b, 0xed, 0x42, + 0xd2, 0xb5, 0x8d, 0xd3, 0x31, 0x77, 0x74, 0x6f, 0xb7, 0x34, 0x91, 0x42, 0xc1, 0xe9, 0x26, 0xd0, + 0x67, 0x2d, 0x38, 0xe2, 0xdc, 0x0e, 0x17, 0xe9, 0x0b, 0xc2, 0xad, 0xce, 0xd5, 0xfd, 0xea, 0x16, + 0x65, 0xcc, 0xe4, 0xb6, 0x7a, 0x3e, 0x53, 0x18, 0x7d, 0xb3, 0x92, 0xc2, 0x37, 0x9a, 0x9f, 0xda, + 0xdb, 0x2d, 0x1d, 0xc9, 0xc2, 0xc2, 0x99, 0x6d, 0x21, 0x2c, 0x32, 0x7e, 0x39, 0xd1, 0x66, 0xbb, + 0xe0, 0x30, 0x59, 0x2e, 0xf3, 0x9c, 0x25, 0x96, 0x10, 0xac, 0xe8, 0xa0, 0x4f, 0xc1, 0xe0, 0x86, + 0x8c, 0x03, 0x91, 0xc1, 0x72, 0xc7, 0x03, 0xd9, 0x3e, 0x3a, 0x06, 0x37, 0x81, 0x51, 0x48, 0x38, + 0x26, 0x8a, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0x8a, 0x10, 0x75, 0xd9, 0x96, 0xb8, 0xa6, 0xad, 0x33, + 0x0f, 0xc1, 0xb4, 0xba, 0x54, 0xc1, 0xb4, 0x22, 0xba, 0x0c, 0xc5, 0xe0, 0x56, 0x4d, 0x68, 0x52, + 0x32, 0x37, 0x29, 0x9e, 0x5b, 0xc8, 0xe9, 0x15, 0xa3, 0x84, 0xe7, 0x16, 0x30, 0x25, 0x81, 0xca, + 0xd0, 0xcb, 0xdc, 0x97, 0x05, 0x6b, 0x9b, 0xf9, 0x94, 0x6f, 0x13, 0x06, 0x80, 0x7b, 0x24, 0x32, + 0x04, 0xcc, 0x09, 0xa1, 0x35, 0xe8, 0xab, 0xba, 0x5e, 0x8d, 0x04, 0x82, 0x97, 0xfd, 0x60, 0xa6, + 0xce, 0x84, 0x61, 0xe4, 0xd0, 0xe4, 0x2a, 0x04, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0x34, 0x37, + 0xd7, 0xe5, 0x8d, 0x95, 0x4d, 0x95, 0x34, 0x37, 0x97, 0x2a, 0x6d, 0xa9, 0x32, 0x0c, 0x2c, 0x68, + 0xa1, 0x97, 0xa0, 0xb0, 0x5e, 0x15, 0xae, 0xc9, 0x99, 0xca, 0x13, 0x33, 0x8a, 0xd6, 0x5c, 0xdf, + 0xde, 0x6e, 0xa9, 0xb0, 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xb4, 0x0a, 0xfd, 0xeb, 0x3c, 0xee, 0x8e, + 0xd0, 0x8f, 0x3c, 0x9e, 0x1d, 0x12, 0x28, 0x15, 0x9a, 0x87, 0x7b, 0x97, 0x0a, 0x00, 0x96, 0x44, + 0x58, 0x02, 0x2a, 0x15, 0x3f, 0x48, 0x84, 0x2f, 0x9d, 0x39, 0x58, 0xcc, 0x27, 0xfe, 0xd4, 0x88, + 0xa3, 0x10, 0x61, 0x8d, 0x22, 0x5d, 0xd5, 0xce, 0xdd, 0x56, 0xc0, 0x72, 0x5b, 0x08, 0xd5, 0x48, + 0xe6, 0xaa, 0x9e, 0x95, 0x48, 0xed, 0x56, 0xb5, 0x42, 0xc2, 0x31, 0x51, 0xb4, 0x05, 0x23, 0xdb, + 0x61, 0x73, 0x93, 0xc8, 0x2d, 0xcd, 0xc2, 0xde, 0xe5, 0x70, 0xb3, 0x37, 0x04, 0xa2, 0x1b, 0x44, + 0x2d, 0xa7, 0x9e, 0x3a, 0x85, 0xd8, 0xb3, 0xe6, 0x86, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0xf0, 0xbf, + 0xdd, 0xf2, 0x6f, 0xed, 0x44, 0x44, 0x44, 0x1d, 0xcd, 0x1c, 0xfe, 0x37, 0x38, 0x4a, 0x7a, 0xf8, + 0x05, 0x00, 0x4b, 0x22, 0xe8, 0x86, 0x18, 0x1e, 0x76, 0x7a, 0x8e, 0xe7, 0x87, 0x34, 0x9f, 0x95, + 0x48, 0x39, 0x83, 0xc2, 0x4e, 0xcb, 0x98, 0x14, 0x3b, 0x25, 0x9b, 0x9b, 0x7e, 0xe4, 0x7b, 0x89, + 0x13, 0x7a, 0x22, 0xff, 0x94, 0x2c, 0x67, 0xe0, 0xa7, 0x4f, 0xc9, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, + 0xaa, 0xc1, 0x68, 0xd3, 0x0f, 0xa2, 0xdb, 0x7e, 0x20, 0xd7, 0x17, 0x6a, 0x23, 0x28, 0x35, 0x30, + 0x45, 0x8b, 0xcc, 0x30, 0xc7, 0x84, 0xe0, 0x04, 0x4d, 0xf4, 0x51, 0xe8, 0x0f, 0xab, 0x4e, 0x9d, + 0x2c, 0x5f, 0x9b, 0x9a, 0xcc, 0xbf, 0x7e, 0x2a, 0x1c, 0x25, 0x67, 0x75, 0xf1, 0xb0, 0x49, 0x1c, + 0x05, 0x4b, 0x72, 0x68, 0x09, 0x7a, 0x59, 0x62, 0x67, 0x16, 0x22, 0x37, 0x27, 0x32, 0x7b, 0xca, + 0xad, 0x86, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, 0x08, 0x49, 0x81, 0x1f, 0x4e, 0x1d, + 0xcd, 0xdf, 0x03, 0x42, 0xc0, 0x70, 0xad, 0xd2, 0x6e, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, + 0x33, 0x3d, 0x4d, 0x8f, 0xb5, 0x31, 0x99, 0xcc, 0x3d, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, + 0xc2, 0xfe, 0x8d, 0x81, 0x34, 0xcf, 0xc2, 0x24, 0x4c, 0xff, 0xbf, 0x95, 0xb2, 0x99, 0xf8, 0x50, + 0xb7, 0x02, 0xef, 0xfb, 0xf8, 0x70, 0xfd, 0xac, 0x05, 0xc7, 0x9a, 0x99, 0x1f, 0x22, 0x18, 0x80, + 0xee, 0xe4, 0xe6, 0xfc, 0xd3, 0x55, 0x38, 0xe5, 0x6c, 0x38, 0xce, 0x69, 0x29, 0x29, 0x1c, 0x28, + 0xbe, 0x63, 0xe1, 0xc0, 0x0a, 0x0c, 0x54, 0xf9, 0x4b, 0x4e, 0xa6, 0x01, 0xe8, 0x2a, 0x18, 0x28, + 0x63, 0x25, 0xc4, 0x13, 0x70, 0x1d, 0x2b, 0x12, 0xe8, 0x87, 0x2d, 0x38, 0x95, 0xec, 0x3a, 0x26, + 0x0c, 0x2c, 0x0c, 0x26, 0xb9, 0x58, 0x6b, 0x49, 0x7c, 0x7f, 0x8a, 0xff, 0x37, 0x90, 0xf7, 0x3b, + 0x21, 0xe0, 0xf6, 0x8d, 0xa1, 0x85, 0x0c, 0xb9, 0x5a, 0x9f, 0xa9, 0x51, 0xec, 0x42, 0xb6, 0xf6, + 0x3c, 0x0c, 0x37, 0xfc, 0x96, 0x17, 0x09, 0xbb, 0x47, 0x61, 0x3c, 0xc5, 0x8c, 0x86, 0x56, 0xb4, + 0x72, 0x6c, 0x60, 0x25, 0x24, 0x72, 0x03, 0xf7, 0x2c, 0x91, 0x7b, 0x0b, 0x86, 0x3d, 0xcd, 0x25, + 0xa0, 0xdd, 0x0b, 0x56, 0x48, 0x17, 0x35, 0x6c, 0xde, 0x4b, 0xbd, 0x04, 0x1b, 0xd4, 0xda, 0x4b, + 0xcb, 0xe0, 0x9d, 0x49, 0xcb, 0x0e, 0xf5, 0x49, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc5, 0xc0, 0xa5, + 0x72, 0xaf, 0x98, 0x52, 0xb9, 0xb3, 0x49, 0xa9, 0x5c, 0x4a, 0x55, 0x65, 0x08, 0xe4, 0xba, 0xcf, + 0x28, 0xd9, 0x75, 0x80, 0xe7, 0xef, 0xb5, 0xe0, 0x38, 0xd3, 0x7d, 0xd0, 0x06, 0xde, 0xb1, 0xbe, + 0x83, 0x99, 0xa4, 0x5e, 0xcd, 0x26, 0x87, 0xf3, 0xda, 0xb1, 0xeb, 0x70, 0xa6, 0xd3, 0xbd, 0xcb, + 0x2c, 0x7c, 0x6b, 0xca, 0x38, 0x22, 0xb6, 0xf0, 0xad, 0x2d, 0x2f, 0x60, 0x06, 0xe9, 0x36, 0x7c, + 0xa1, 0xfd, 0xdf, 0x2d, 0x28, 0x96, 0xfd, 0xda, 0x21, 0xbc, 0xe8, 0x3f, 0x62, 0xbc, 0xe8, 0x1f, + 0xca, 0xbe, 0xf1, 0x6b, 0xb9, 0xca, 0xbe, 0xc5, 0x84, 0xb2, 0xef, 0x54, 0x1e, 0x81, 0xf6, 0xaa, + 0xbd, 0x9f, 0x2e, 0xc2, 0x50, 0xd9, 0xaf, 0xa9, 0x7d, 0xf6, 0xaf, 0xee, 0xc5, 0x91, 0x27, 0x37, + 0xfb, 0x94, 0x46, 0x99, 0x59, 0xf4, 0xca, 0xb8, 0x13, 0xdf, 0x66, 0xfe, 0x3c, 0x37, 0x89, 0xbb, + 0xb1, 0x19, 0x91, 0x5a, 0xf2, 0x73, 0x0e, 0xcf, 0x9f, 0xe7, 0x9b, 0x45, 0x18, 0x4b, 0xb4, 0x8e, + 0xea, 0x30, 0x52, 0xd7, 0x55, 0x49, 0x62, 0x9d, 0xde, 0x93, 0x16, 0x4a, 0xf8, 0x43, 0x68, 0x45, + 0xd8, 0x24, 0x8e, 0x66, 0x00, 0x3c, 0xdd, 0x2a, 0x5c, 0x05, 0x2a, 0xd6, 0x2c, 0xc2, 0x35, 0x0c, + 0xf4, 0x02, 0x0c, 0x45, 0x7e, 0xd3, 0xaf, 0xfb, 0x1b, 0x3b, 0x57, 0x88, 0x8c, 0x6c, 0xa9, 0x8c, + 0x86, 0xd7, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x1d, 0x98, 0x50, 0x44, 0x2a, 0xf7, 0x41, 0xbd, 0xc6, + 0xc4, 0x26, 0xab, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x09, 0x46, 0x99, 0xf5, 0x32, 0xab, 0x7f, + 0x85, 0xec, 0xc8, 0x88, 0xc7, 0x8c, 0xc3, 0x5e, 0x31, 0x20, 0x38, 0x81, 0x89, 0xe6, 0x61, 0xa2, + 0xe1, 0x86, 0x89, 0xea, 0x7d, 0xac, 0x3a, 0xeb, 0xc0, 0x4a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0xcf, + 0x89, 0x39, 0xf6, 0x22, 0xf7, 0xfd, 0xed, 0xf8, 0xee, 0xde, 0x8e, 0xdf, 0xb0, 0x60, 0x9c, 0xb6, + 0xce, 0x4c, 0x32, 0x25, 0x23, 0xa5, 0x72, 0x62, 0x58, 0x6d, 0x72, 0x62, 0x9c, 0xa5, 0xc7, 0x76, + 0xcd, 0x6f, 0x45, 0x42, 0x3a, 0xaa, 0x9d, 0xcb, 0xb4, 0x14, 0x0b, 0xa8, 0xc0, 0x23, 0x41, 0x20, + 0xfc, 0xde, 0x75, 0x3c, 0x12, 0x04, 0x58, 0x40, 0x65, 0xca, 0x8c, 0x9e, 0xec, 0x94, 0x19, 0x3c, + 0xf2, 0xb9, 0xb0, 0x82, 0x13, 0x2c, 0xad, 0x16, 0xf9, 0x5c, 0x9a, 0xc7, 0xc5, 0x38, 0xf6, 0x57, + 0x8b, 0x30, 0x5c, 0xf6, 0x6b, 0xb1, 0x61, 0xc7, 0xf3, 0x86, 0x61, 0xc7, 0x99, 0x84, 0x61, 0xc7, + 0xb8, 0x8e, 0xfb, 0xbe, 0x19, 0xc7, 0xb7, 0xca, 0x8c, 0xe3, 0x37, 0x2d, 0x36, 0x6b, 0x0b, 0xab, + 0x15, 0x6e, 0xe1, 0x8b, 0x2e, 0xc0, 0x10, 0x3b, 0xe1, 0x58, 0xa0, 0x05, 0x69, 0xed, 0xc0, 0x52, + 0x58, 0xae, 0xc6, 0xc5, 0x58, 0xc7, 0x41, 0xe7, 0x60, 0x20, 0x24, 0x4e, 0x50, 0xdd, 0x54, 0xc7, + 0xbb, 0x30, 0x4d, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xd0, 0xed, 0x62, 0xbe, 0xb9, 0xb0, + 0xde, 0x1f, 0xbe, 0x45, 0xf2, 0x23, 0x6d, 0xdb, 0x37, 0x01, 0xa5, 0xf1, 0xbb, 0xf0, 0xbf, 0x2a, + 0x99, 0x61, 0x61, 0x07, 0x53, 0x21, 0x61, 0xff, 0xda, 0x82, 0xd1, 0xb2, 0x5f, 0xa3, 0x5b, 0xf7, + 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x02, 0xbd, 0x65, 0xbf, 0xd6, + 0x21, 0x74, 0xed, 0x3f, 0xb0, 0xa0, 0xbf, 0xec, 0xd7, 0x0e, 0x41, 0xf1, 0xf2, 0x8a, 0xa9, 0x78, + 0x39, 0x9e, 0xb3, 0x6e, 0x72, 0x74, 0x2d, 0x7f, 0xaf, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x0d, 0x39, + 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x3b, 0x39, 0xad, + 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x06, 0x9a, 0x01, 0xd9, 0x76, 0x7d, 0xc1, 0x5f, 0x6b, + 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, 0xf7, + 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x9b, 0x30, 0xc8, 0xfe, + 0xb3, 0x63, 0xa7, 0xf7, 0xc0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, 0x16, + 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, 0x58, + 0xe8, 0x29, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xd5, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, 0xeb, + 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x89, 0x44, 0x6a, 0xba, 0x22, + 0xe7, 0x05, 0xaf, 0xaa, 0x52, 0xac, 0x61, 0xa0, 0x4d, 0x38, 0xe9, 0x7a, 0x2c, 0x85, 0x14, 0xa9, + 0x6c, 0xb9, 0xcd, 0xb5, 0xab, 0x95, 0x1b, 0x24, 0x70, 0xd7, 0x77, 0xe6, 0x9c, 0xea, 0x16, 0xf1, + 0x64, 0x42, 0xfc, 0x47, 0x45, 0x17, 0x4f, 0x2e, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0xd9, 0xcf, 0xb1, + 0xf5, 0x7e, 0xad, 0x82, 0x9e, 0x34, 0x8e, 0x8e, 0x63, 0xfa, 0xd1, 0xb1, 0xbf, 0x5b, 0xea, 0xbb, + 0x56, 0xd1, 0x62, 0xff, 0x5c, 0x84, 0xa3, 0x65, 0xbf, 0x56, 0xf6, 0x83, 0x68, 0xc9, 0x0f, 0x6e, + 0x3b, 0x41, 0x4d, 0x2e, 0xaf, 0x92, 0x8c, 0x7e, 0x44, 0xcf, 0xcf, 0x5e, 0x7e, 0xba, 0x18, 0x91, + 0x8d, 0x9e, 0x63, 0x1c, 0xdb, 0x01, 0x9d, 0x4d, 0xab, 0x8c, 0x77, 0x50, 0x49, 0xd8, 0x2e, 0x39, + 0x11, 0x41, 0xd7, 0x60, 0xa4, 0xaa, 0x5f, 0xa3, 0xa2, 0xfa, 0x13, 0xf2, 0x22, 0x33, 0xee, 0xd8, + 0xcc, 0x7b, 0xd7, 0xac, 0x6f, 0x7f, 0xdd, 0x12, 0xad, 0x70, 0x49, 0x04, 0xb7, 0x69, 0xed, 0x7c, + 0x9e, 0xce, 0xc3, 0x44, 0xa0, 0x57, 0xd1, 0x6c, 0xc3, 0x8e, 0xf2, 0xac, 0x36, 0x09, 0x20, 0x4e, + 0xe3, 0xa3, 0x8f, 0xc3, 0x09, 0xa3, 0x50, 0xaa, 0xc9, 0xb5, 0xdc, 0xd2, 0x4c, 0x56, 0x83, 0xf3, + 0x90, 0x70, 0x7e, 0x7d, 0xfb, 0xbb, 0xe1, 0x58, 0xf2, 0xbb, 0x84, 0xf4, 0xe4, 0x1e, 0xbf, 0xae, + 0x70, 0xb0, 0xaf, 0xb3, 0x5f, 0x80, 0x09, 0xfa, 0xac, 0x56, 0x2c, 0x22, 0x9b, 0xbf, 0xce, 0x01, + 0xa6, 0xfe, 0x5d, 0x3f, 0xbb, 0xe2, 0x12, 0x99, 0xd5, 0xd0, 0x27, 0x61, 0x34, 0x24, 0x2c, 0xaa, + 0x9a, 0x94, 0xda, 0xb5, 0xf1, 0x14, 0xaf, 0x2c, 0xea, 0x98, 0xfc, 0x65, 0x62, 0x96, 0xe1, 0x04, + 0x35, 0xd4, 0x80, 0xd1, 0xdb, 0xae, 0x57, 0xf3, 0x6f, 0x87, 0x92, 0xfe, 0x40, 0xbe, 0x0a, 0xe0, + 0x26, 0xc7, 0x4c, 0xf4, 0xd1, 0x68, 0xee, 0xa6, 0x41, 0x0c, 0x27, 0x88, 0xd3, 0x63, 0x24, 0x68, + 0x79, 0xb3, 0xe1, 0xf5, 0x90, 0x04, 0x22, 0xe6, 0x1b, 0x3b, 0x46, 0xb0, 0x2c, 0xc4, 0x31, 0x9c, + 0x1e, 0x23, 0xec, 0x0f, 0x73, 0x35, 0x67, 0xe7, 0x94, 0x38, 0x46, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, + 0x3d, 0x66, 0xd9, 0xbf, 0x55, 0xdf, 0xc3, 0xbe, 0x1f, 0xc9, 0x83, 0x99, 0xa5, 0xa1, 0xd4, 0xca, + 0xb1, 0x81, 0x95, 0x13, 0x61, 0xae, 0xe7, 0xa0, 0x11, 0xe6, 0x50, 0xd4, 0xc6, 0xbb, 0x9e, 0x47, + 0x3a, 0xbe, 0xd8, 0xce, 0xbb, 0x7e, 0xff, 0x9e, 0x3c, 0xef, 0xe9, 0x3d, 0xbf, 0x2e, 0x06, 0xa8, + 0x97, 0x87, 0xd0, 0x63, 0x4a, 0xca, 0x0a, 0x1f, 0x1d, 0x09, 0x43, 0x8b, 0xd0, 0x1f, 0xee, 0x84, + 0xd5, 0xa8, 0x1e, 0xb6, 0x4b, 0x35, 0x5a, 0x61, 0x28, 0x5a, 0xa6, 0x6b, 0x5e, 0x05, 0xcb, 0xba, + 0xa8, 0x0a, 0x93, 0x82, 0xe2, 0xfc, 0xa6, 0xe3, 0xa9, 0x04, 0x88, 0xdc, 0x1a, 0xf1, 0xc2, 0xde, + 0x6e, 0x69, 0x52, 0xb4, 0xac, 0x83, 0xf7, 0x77, 0x4b, 0x74, 0x4b, 0x66, 0x40, 0x70, 0x16, 0x35, + 0xbe, 0xe4, 0xab, 0x55, 0xbf, 0xd1, 0x2c, 0x07, 0xfe, 0xba, 0x5b, 0x27, 0xed, 0x14, 0xbd, 0x15, + 0x03, 0x53, 0x2c, 0x79, 0xa3, 0x0c, 0x27, 0xa8, 0xa1, 0x5b, 0x30, 0xe6, 0x34, 0x9b, 0xb3, 0x41, + 0xc3, 0x0f, 0x64, 0x03, 0x43, 0xf9, 0x1a, 0x83, 0x59, 0x13, 0x95, 0xe7, 0x3f, 0x4c, 0x14, 0xe2, + 0x24, 0x41, 0xfb, 0xbb, 0x18, 0xbf, 0x5d, 0x71, 0x37, 0x3c, 0xe6, 0x93, 0x86, 0x1a, 0x30, 0xd2, + 0x64, 0x27, 0xb2, 0x48, 0x1b, 0x26, 0x76, 0xf1, 0xf3, 0x5d, 0xca, 0x0c, 0x6f, 0xb3, 0xc4, 0xa7, + 0x86, 0xed, 0x68, 0x59, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0xbf, 0x3f, 0xc1, 0x38, 0xb6, 0x0a, 0x17, + 0x04, 0xf6, 0x0b, 0x0f, 0x41, 0xf1, 0xf4, 0x9f, 0xce, 0x17, 0xb9, 0xc7, 0x53, 0x2f, 0xbc, 0x0c, + 0xb1, 0xac, 0x8b, 0x3e, 0x01, 0xa3, 0xf4, 0x25, 0xad, 0xb8, 0xa6, 0x70, 0xea, 0x48, 0x7e, 0xe8, + 0x29, 0x85, 0xa5, 0xa7, 0x14, 0xd4, 0x2b, 0xe3, 0x04, 0x31, 0xf4, 0x06, 0x33, 0xa7, 0x94, 0xa4, + 0x0b, 0xdd, 0x90, 0xd6, 0x2d, 0x27, 0x25, 0x59, 0x8d, 0x08, 0x6a, 0xc1, 0x64, 0x3a, 0x71, 0x72, + 0x38, 0x65, 0xe7, 0x3f, 0x49, 0xd2, 0xb9, 0x8f, 0xe3, 0xdc, 0x6f, 0x69, 0x58, 0x88, 0xb3, 0xe8, + 0xa3, 0xab, 0xc9, 0xb4, 0xb6, 0x45, 0x43, 0x58, 0x9f, 0x4a, 0x6d, 0x3b, 0xd2, 0x36, 0xa3, 0xed, + 0x06, 0x9c, 0xd2, 0x32, 0x83, 0x5e, 0x0a, 0x1c, 0x66, 0xce, 0xe3, 0xb2, 0x8b, 0x42, 0xe3, 0x25, + 0x1f, 0xde, 0xdb, 0x2d, 0x9d, 0x5a, 0x6b, 0x87, 0x88, 0xdb, 0xd3, 0x41, 0xd7, 0xe0, 0x28, 0x0f, + 0x9c, 0xb2, 0x40, 0x9c, 0x5a, 0xdd, 0xf5, 0x14, 0xb3, 0xca, 0x8f, 0x95, 0x13, 0x7b, 0xbb, 0xa5, + 0xa3, 0xb3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x05, 0x06, 0x6b, 0x9e, 0x3c, 0x00, 0xfb, 0x8c, + 0xe4, 0xab, 0x83, 0x0b, 0xab, 0x15, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, 0x57, 0x40, 0x1b, 0x5c, 0x5b, + 0xa4, 0x44, 0x7c, 0xfd, 0xa9, 0x78, 0x9a, 0x49, 0x29, 0xb8, 0x11, 0x89, 0x80, 0xab, 0x49, 0x95, + 0xa7, 0x9b, 0x11, 0xa4, 0xc0, 0x20, 0x8c, 0x5e, 0x07, 0x24, 0x92, 0xfc, 0xcc, 0x56, 0x59, 0x4e, + 0x3a, 0xcd, 0x84, 0x53, 0xbd, 0xdc, 0x2b, 0x29, 0x0c, 0x9c, 0x51, 0x0b, 0x5d, 0xa6, 0x27, 0x97, + 0x5e, 0x2a, 0x4e, 0x46, 0x95, 0xe2, 0x7b, 0x81, 0x34, 0x03, 0xc2, 0xac, 0x0e, 0x4d, 0x8a, 0x38, + 0x51, 0x0f, 0xd5, 0xe0, 0xa4, 0xd3, 0x8a, 0x7c, 0xa6, 0x88, 0x33, 0x51, 0xd7, 0xfc, 0x2d, 0xe2, + 0x31, 0x1d, 0xf8, 0x00, 0x8b, 0xd3, 0x79, 0x72, 0xb6, 0x0d, 0x1e, 0x6e, 0x4b, 0x85, 0xbe, 0x62, + 0xe8, 0x58, 0x68, 0x3a, 0x32, 0xc3, 0xa9, 0x9a, 0x2b, 0x8e, 0x25, 0x06, 0x7a, 0x01, 0x86, 0x36, + 0xfd, 0x30, 0x5a, 0x25, 0xd1, 0x6d, 0x3f, 0xd8, 0x12, 0x59, 0x05, 0xe2, 0x4c, 0x2e, 0x31, 0x08, + 0xeb, 0x78, 0xe8, 0x09, 0xe8, 0x67, 0x16, 0x5a, 0xcb, 0x0b, 0xec, 0x1a, 0x1c, 0x88, 0xcf, 0x98, + 0xcb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0x2e, 0xcf, 0x33, 0x43, 0x97, 0x04, 0xea, 0x72, 0x79, + 0x1e, 0x4b, 0x38, 0x5d, 0xae, 0xe1, 0xa6, 0x13, 0x90, 0x72, 0xe0, 0x57, 0x49, 0xa8, 0xe5, 0x0f, + 0x7a, 0x88, 0xe7, 0x4c, 0xa0, 0xcb, 0xb5, 0x92, 0x85, 0x80, 0xb3, 0xeb, 0x21, 0x92, 0xce, 0x8a, + 0x3b, 0x9a, 0xaf, 0xa1, 0x4c, 0x73, 0x6a, 0x5d, 0x26, 0xc6, 0xf5, 0x60, 0x5c, 0xe5, 0xe3, 0xe5, + 0x59, 0x12, 0xc2, 0xa9, 0x31, 0xb6, 0xb6, 0xbb, 0x4f, 0xb1, 0xa0, 0x74, 0xbe, 0xcb, 0x09, 0x4a, + 0x38, 0x45, 0xdb, 0x08, 0x04, 0x3b, 0xde, 0x31, 0x10, 0xec, 0x79, 0x18, 0x0c, 0x5b, 0xb7, 0x6a, + 0x7e, 0xc3, 0x71, 0x3d, 0x66, 0xe8, 0xa2, 0xbd, 0x97, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xb4, 0x04, + 0x03, 0x8e, 0x54, 0xe8, 0xa2, 0xfc, 0x18, 0x77, 0x4a, 0x8d, 0xcb, 0xc3, 0x3e, 0x49, 0x15, 0xae, + 0xaa, 0x8b, 0x5e, 0x86, 0x11, 0x11, 0x47, 0x43, 0xa4, 0xb0, 0x9f, 0x34, 0xbd, 0x86, 0x2b, 0x3a, + 0x10, 0x9b, 0xb8, 0xe8, 0x3a, 0x0c, 0x45, 0x7e, 0x9d, 0xb9, 0xbe, 0x52, 0x06, 0xf6, 0x58, 0x7e, + 0x28, 0xda, 0x35, 0x85, 0xa6, 0xab, 0x1a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x6b, 0x7c, 0xbd, 0xb3, + 0x6c, 0x41, 0x24, 0x14, 0x39, 0xd0, 0x4f, 0xe5, 0x59, 0x29, 0x32, 0x34, 0x73, 0x3b, 0x88, 0x9a, + 0x58, 0x27, 0x83, 0x2e, 0xc1, 0x44, 0x33, 0x70, 0x7d, 0xb6, 0x26, 0x94, 0x82, 0x7a, 0xca, 0xcc, + 0x0d, 0x5a, 0x4e, 0x22, 0xe0, 0x74, 0x1d, 0x16, 0x06, 0x45, 0x14, 0x4e, 0x9d, 0xe0, 0xf9, 0xcd, + 0xb8, 0xf8, 0x81, 0x97, 0x61, 0x05, 0x45, 0x2b, 0xec, 0x24, 0xe6, 0x92, 0xb3, 0xa9, 0xe9, 0x7c, + 0xe7, 0x7a, 0x5d, 0xc2, 0xc6, 0xd9, 0x72, 0xf5, 0x17, 0xc7, 0x14, 0x50, 0x4d, 0x4b, 0x2b, 0x4e, + 0x1f, 0x37, 0xe1, 0xd4, 0xc9, 0x36, 0x66, 0xb2, 0x89, 0x97, 0x6c, 0xcc, 0x10, 0x18, 0xc5, 0x21, + 0x4e, 0xd0, 0x44, 0xaf, 0xc1, 0xb8, 0x88, 0x11, 0x10, 0x0f, 0xd3, 0xa9, 0xd8, 0x95, 0x08, 0x27, + 0x60, 0x38, 0x85, 0xcd, 0xf3, 0x8b, 0x39, 0xb7, 0xea, 0x44, 0x1c, 0x7d, 0x57, 0x5d, 0x6f, 0x2b, + 0x9c, 0x3a, 0xcd, 0xce, 0x07, 0x91, 0x5f, 0x2c, 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x1a, 0x8c, 0x37, + 0x03, 0x42, 0x1a, 0xec, 0x09, 0x23, 0xee, 0xb3, 0x12, 0x8f, 0x02, 0x44, 0x7b, 0x52, 0x4e, 0xc0, + 0xf6, 0x33, 0xca, 0x70, 0x8a, 0x02, 0xba, 0x0d, 0x03, 0xfe, 0x36, 0x09, 0x36, 0x89, 0x53, 0x9b, + 0x3a, 0xd3, 0xc6, 0xc1, 0x4d, 0x5c, 0x6e, 0xd7, 0x04, 0x6e, 0xc2, 0xfe, 0x47, 0x16, 0x77, 0xb6, + 0xff, 0x91, 0x8d, 0xa1, 0xbf, 0x65, 0xc1, 0x09, 0xa9, 0x51, 0xab, 0x34, 0xe9, 0xa8, 0xcf, 0xfb, + 0x5e, 0x18, 0x05, 0x3c, 0x6e, 0xcd, 0xc3, 0xf9, 0xb1, 0x5c, 0xd6, 0x72, 0x2a, 0x29, 0xe1, 0xfd, + 0x89, 0x3c, 0x8c, 0x10, 0xe7, 0xb7, 0x48, 0x1f, 0xdd, 0x21, 0x89, 0xe4, 0x61, 0x34, 0x1b, 0x2e, + 0xbd, 0xb1, 0xb0, 0x3a, 0xf5, 0x08, 0x0f, 0xba, 0x43, 0x37, 0x43, 0x25, 0x09, 0xc4, 0x69, 0x7c, + 0x74, 0x01, 0x0a, 0x7e, 0x38, 0xf5, 0x68, 0x9b, 0x4c, 0xf4, 0x7e, 0xed, 0x5a, 0x85, 0xdb, 0x81, + 0x5e, 0xab, 0xe0, 0x82, 0x1f, 0xca, 0x1c, 0x5f, 0xf4, 0xa5, 0x19, 0x4e, 0x3d, 0xc6, 0x45, 0xbd, + 0x32, 0xc7, 0x17, 0x2b, 0xc4, 0x31, 0x1c, 0x6d, 0xc2, 0x58, 0x68, 0xbc, 0xe8, 0xc3, 0xa9, 0xb3, + 0x6c, 0xa4, 0x1e, 0xcb, 0x9b, 0x34, 0x03, 0x5b, 0x4b, 0xbe, 0x63, 0x52, 0xc1, 0x49, 0xb2, 0x7c, + 0x77, 0x69, 0x32, 0x85, 0x70, 0xea, 0xf1, 0x0e, 0xbb, 0x4b, 0x43, 0xd6, 0x77, 0x97, 0x4e, 0x03, + 0x27, 0x68, 0x4e, 0x7f, 0x07, 0x4c, 0xa4, 0xd8, 0xa5, 0x83, 0xf8, 0x3c, 0x4c, 0x6f, 0xc1, 0x88, + 0xb1, 0x24, 0x1f, 0xa8, 0x49, 0xcc, 0xef, 0x0e, 0xc2, 0xa0, 0x32, 0x55, 0x40, 0xe7, 0x4d, 0x2b, + 0x98, 0x13, 0x49, 0x2b, 0x98, 0x81, 0xb2, 0x5f, 0x33, 0x0c, 0x5f, 0xd6, 0x32, 0x62, 0xc9, 0xe6, + 0x1d, 0x80, 0xdd, 0x3b, 0x66, 0x69, 0xea, 0x97, 0x62, 0xd7, 0xe6, 0x34, 0x3d, 0x6d, 0x35, 0x3a, + 0x97, 0x60, 0xc2, 0xf3, 0x19, 0x8f, 0x4e, 0x6a, 0x92, 0x01, 0x63, 0x7c, 0xd6, 0xa0, 0x1e, 0xeb, + 0x2c, 0x81, 0x80, 0xd3, 0x75, 0x68, 0x83, 0x9c, 0x51, 0x4a, 0xaa, 0x90, 0x38, 0x1f, 0x85, 0x05, + 0x94, 0xbe, 0x0d, 0xf9, 0xaf, 0x70, 0x6a, 0x3c, 0xff, 0x6d, 0xc8, 0x2b, 0x25, 0x99, 0xb1, 0x50, + 0x32, 0x63, 0x4c, 0x63, 0xd2, 0xf4, 0x6b, 0xcb, 0x65, 0xc1, 0xe6, 0x6b, 0x51, 0xde, 0x6b, 0xcb, + 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x43, 0x26, 0x6f, 0x9b, 0x2e, 0x97, 0xb5, + 0x1c, 0xa3, 0xac, 0x02, 0x16, 0x15, 0x99, 0x44, 0x9c, 0xbe, 0x8d, 0x98, 0x44, 0xbc, 0xff, 0x1e, + 0x25, 0xe2, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0x3b, 0x70, 0xd4, 0x78, 0x8f, 0x2a, 0x4f, 0x35, 0xc8, + 0x57, 0x96, 0x27, 0x90, 0xe7, 0x4e, 0x89, 0x4e, 0x1f, 0x5d, 0xce, 0xa2, 0x84, 0xb3, 0x1b, 0x40, + 0x75, 0x98, 0xa8, 0xa6, 0x5a, 0x1d, 0xe8, 0xbe, 0x55, 0xb5, 0x2e, 0xd2, 0x2d, 0xa6, 0x09, 0xa3, + 0x97, 0x61, 0xe0, 0x6d, 0x9f, 0x1b, 0xb6, 0x89, 0xa7, 0x89, 0x8c, 0x92, 0x32, 0xf0, 0xc6, 0xb5, + 0x0a, 0x2b, 0xdf, 0xdf, 0x2d, 0x0d, 0x95, 0xfd, 0x9a, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x03, 0x16, + 0x4c, 0xa7, 0x1f, 0xbc, 0xaa, 0xd3, 0x23, 0xdd, 0x77, 0xda, 0x16, 0x8d, 0x4e, 0x2f, 0xe6, 0x92, + 0xc3, 0x6d, 0x9a, 0x42, 0x1f, 0xa6, 0xfb, 0x29, 0x74, 0xef, 0x12, 0x91, 0xa0, 0xfd, 0xe1, 0x78, + 0x3f, 0xd1, 0xd2, 0xfd, 0xdd, 0xd2, 0x18, 0x3f, 0x19, 0xdd, 0xbb, 0x2a, 0x1e, 0x3d, 0xaf, 0x80, + 0xbe, 0x1b, 0x8e, 0x06, 0x69, 0xd9, 0x30, 0x91, 0x4c, 0xf8, 0x93, 0xdd, 0x9c, 0xb2, 0xc9, 0x09, + 0xc7, 0x59, 0x04, 0x71, 0x76, 0x3b, 0xf6, 0xaf, 0x59, 0x4c, 0x27, 0x20, 0xba, 0x45, 0xc2, 0x56, + 0x3d, 0x3a, 0x04, 0x63, 0xb2, 0x45, 0x43, 0xdf, 0x7e, 0xcf, 0xd6, 0x60, 0xff, 0xd2, 0x62, 0xd6, + 0x60, 0x87, 0xe8, 0xd7, 0xf6, 0x06, 0x0c, 0x44, 0xa2, 0x35, 0xd1, 0xf5, 0x3c, 0xcb, 0x15, 0xd9, + 0x29, 0x66, 0x11, 0xa7, 0x1e, 0x39, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, 0xe3, 0x33, 0x20, 0x21, + 0x87, 0xa0, 0xd6, 0x5c, 0x30, 0xd5, 0x9a, 0xa5, 0x0e, 0x5f, 0x90, 0xa3, 0xde, 0xfc, 0xa7, 0x66, + 0xbf, 0x99, 0x70, 0xef, 0xdd, 0x6e, 0x86, 0x68, 0x7f, 0xde, 0x02, 0x88, 0x13, 0x80, 0x74, 0x91, + 0x90, 0xf9, 0x22, 0x7d, 0xd6, 0xf8, 0x91, 0x5f, 0xf5, 0xeb, 0x42, 0xf5, 0x72, 0x32, 0xd6, 0xac, + 0xf2, 0xf2, 0x7d, 0xed, 0x37, 0x56, 0xd8, 0xa8, 0x24, 0x23, 0xf2, 0x16, 0x63, 0x5d, 0xbf, 0x11, + 0x8d, 0xf7, 0x4b, 0x16, 0x1c, 0xc9, 0x72, 0x92, 0xa0, 0x8f, 0x64, 0x2e, 0xe6, 0x54, 0x26, 0xa2, + 0x6a, 0x36, 0x6f, 0x88, 0x72, 0xac, 0x30, 0xba, 0xce, 0x74, 0x7d, 0xb0, 0xe4, 0x14, 0xd7, 0x60, + 0xa4, 0x1c, 0x10, 0x8d, 0xbf, 0x78, 0x35, 0xce, 0x9b, 0x33, 0x38, 0xf7, 0xf4, 0x81, 0x23, 0x0f, + 0xd9, 0x5f, 0x2e, 0xc0, 0x11, 0x6e, 0xe8, 0x34, 0xbb, 0xed, 0xbb, 0xb5, 0xb2, 0x5f, 0x13, 0xae, + 0xad, 0x6f, 0xc2, 0x70, 0x53, 0x93, 0x4d, 0xb7, 0x0b, 0xb4, 0xae, 0xcb, 0xb0, 0x63, 0x69, 0x9a, + 0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x06, 0xc3, 0x64, 0xdb, 0xad, 0x2a, 0x6b, 0x99, 0xc2, 0x81, 0x2f, + 0x69, 0xd5, 0xca, 0xa2, 0x46, 0x07, 0x1b, 0x54, 0xbb, 0x36, 0x4f, 0xd6, 0x58, 0xb4, 0x9e, 0x0e, + 0x16, 0x32, 0x3f, 0x6e, 0xc1, 0xf1, 0x9c, 0xb0, 0xec, 0xb4, 0xb9, 0xdb, 0xcc, 0xa4, 0x4c, 0x2c, + 0x5b, 0xd5, 0x1c, 0x37, 0x34, 0xc3, 0x02, 0x8a, 0x3e, 0x0a, 0xd0, 0x8c, 0x53, 0x52, 0x76, 0x88, + 0x5f, 0x6d, 0x44, 0xb2, 0xd5, 0x82, 0x92, 0xaa, 0xcc, 0x95, 0x1a, 0x2d, 0xfb, 0x4b, 0x3d, 0xd0, + 0xcb, 0x0c, 0x93, 0x50, 0x19, 0xfa, 0x37, 0x79, 0xcc, 0xbc, 0xb6, 0xf3, 0x46, 0x71, 0x65, 0x10, + 0xbe, 0x78, 0xde, 0xb4, 0x52, 0x2c, 0xc9, 0xa0, 0x15, 0x98, 0xe4, 0xe9, 0x36, 0xeb, 0x0b, 0xa4, + 0xee, 0xec, 0x48, 0xb1, 0x6f, 0x81, 0x7d, 0xaa, 0x12, 0x7f, 0x2f, 0xa7, 0x51, 0x70, 0x56, 0x3d, + 0xf4, 0x2a, 0x8c, 0xd2, 0x67, 0xb8, 0xdf, 0x8a, 0x24, 0x25, 0x9e, 0xdf, 0x52, 0xbd, 0x4c, 0xd6, + 0x0c, 0x28, 0x4e, 0x60, 0xa3, 0x97, 0x61, 0xa4, 0x99, 0x12, 0x70, 0xf7, 0xc6, 0x92, 0x20, 0x53, + 0xa8, 0x6d, 0xe2, 0x32, 0x3f, 0x89, 0x16, 0xf3, 0x0a, 0x59, 0xdb, 0x0c, 0x48, 0xb8, 0xe9, 0xd7, + 0x6b, 0x8c, 0x03, 0xee, 0xd5, 0xfc, 0x24, 0x12, 0x70, 0x9c, 0xaa, 0x41, 0xa9, 0xac, 0x3b, 0x6e, + 0xbd, 0x15, 0x90, 0x98, 0x4a, 0x9f, 0x49, 0x65, 0x29, 0x01, 0xc7, 0xa9, 0x1a, 0x9d, 0x25, 0xf7, + 0xfd, 0xf7, 0x47, 0x72, 0x6f, 0xff, 0x4c, 0x01, 0x8c, 0xa9, 0x7d, 0x0f, 0xe7, 0xdd, 0x7c, 0x05, + 0x7a, 0x36, 0x82, 0x66, 0x55, 0x18, 0xe1, 0x65, 0x7e, 0x59, 0x9c, 0xfd, 0x9f, 0x7f, 0x19, 0xfd, + 0x8f, 0x59, 0x2d, 0xba, 0xc7, 0x8f, 0x96, 0x03, 0x9f, 0x5e, 0x72, 0x32, 0xac, 0xa6, 0x72, 0x47, + 0xea, 0x97, 0x81, 0x35, 0xda, 0x04, 0xa0, 0x16, 0x3e, 0x15, 0x9c, 0x82, 0x61, 0xaf, 0x56, 0x11, + 0xe1, 0x73, 0x24, 0x15, 0x74, 0x01, 0x86, 0x44, 0x2a, 0x44, 0xe6, 0x35, 0xc3, 0x37, 0x13, 0xb3, + 0xaf, 0x5b, 0x88, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x60, 0x01, 0x26, 0x33, 0xdc, 0x1e, 0xf9, 0x35, + 0xb2, 0xe1, 0x86, 0x51, 0xb0, 0x93, 0xbc, 0x9c, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x9e, 0x55, 0xfc, + 0xa2, 0x4a, 0x5e, 0x4e, 0xc2, 0xad, 0x48, 0x40, 0x0f, 0x98, 0xaa, 0xff, 0x0c, 0xf4, 0xb4, 0x42, + 0x22, 0x63, 0xdd, 0xab, 0x6b, 0x9b, 0x29, 0xec, 0x19, 0x84, 0x3e, 0x01, 0x37, 0x94, 0x16, 0x5a, + 0x7b, 0x02, 0x72, 0x3d, 0x34, 0x87, 0xd1, 0xce, 0x45, 0xc4, 0x73, 0xbc, 0x48, 0x3c, 0x14, 0xe3, + 0x18, 0xc8, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xc5, 0x22, 0x9c, 0xc8, 0x75, 0x84, 0xa6, 0x5d, 0x6f, + 0xf8, 0x9e, 0x1b, 0xf9, 0xca, 0x70, 0x91, 0xc7, 0x3d, 0x26, 0xcd, 0xcd, 0x15, 0x51, 0x8e, 0x15, + 0x06, 0x3a, 0x0b, 0xbd, 0x4c, 0x28, 0x9e, 0x4c, 0x83, 0x86, 0xe7, 0x16, 0x78, 0x44, 0x49, 0x0e, + 0xd6, 0x6e, 0xf5, 0x62, 0xdb, 0x5b, 0xfd, 0x11, 0xca, 0xc1, 0xf8, 0xf5, 0xe4, 0x85, 0x42, 0xbb, + 0xeb, 0xfb, 0x75, 0xcc, 0x80, 0xe8, 0x31, 0x31, 0x5e, 0x09, 0x4b, 0x3d, 0xec, 0xd4, 0xfc, 0x50, + 0x1b, 0xb4, 0x27, 0xa0, 0x7f, 0x8b, 0xec, 0x04, 0xae, 0xb7, 0x91, 0xb4, 0xe0, 0xbc, 0xc2, 0x8b, + 0xb1, 0x84, 0x9b, 0x59, 0xbf, 0xfb, 0xef, 0x47, 0xd6, 0x6f, 0x7d, 0x05, 0x0c, 0x74, 0x64, 0x4f, + 0x7e, 0xa8, 0x08, 0x63, 0x78, 0x6e, 0xe1, 0xfd, 0x89, 0xb8, 0x9e, 0x9e, 0x88, 0xfb, 0x91, 0x1c, + 0xfb, 0x60, 0xb3, 0xf1, 0xcb, 0x16, 0x8c, 0xb1, 0x84, 0x8c, 0x22, 0x8a, 0x89, 0xeb, 0x7b, 0x87, + 0xf0, 0x14, 0x78, 0x04, 0x7a, 0x03, 0xda, 0xa8, 0x98, 0x41, 0xb5, 0xc7, 0x59, 0x4f, 0x30, 0x87, + 0xa1, 0x93, 0xd0, 0xc3, 0xba, 0x40, 0x27, 0x6f, 0x98, 0x1f, 0xc1, 0x0b, 0x4e, 0xe4, 0x60, 0x56, + 0xca, 0xe2, 0x29, 0x62, 0xd2, 0xac, 0xbb, 0xbc, 0xd3, 0xb1, 0xc9, 0xc2, 0xbb, 0x23, 0x44, 0x4a, + 0x66, 0xd7, 0xde, 0x59, 0x3c, 0xc5, 0x6c, 0x92, 0xed, 0x9f, 0xd9, 0x7f, 0x5e, 0x80, 0xd3, 0x99, + 0xf5, 0xba, 0x8e, 0xa7, 0xd8, 0xbe, 0xf6, 0x83, 0x4c, 0xdf, 0x56, 0x3c, 0x44, 0xfb, 0xf8, 0x9e, + 0x6e, 0xb9, 0xff, 0xde, 0x2e, 0xc2, 0x1c, 0x66, 0x0e, 0xd9, 0xbb, 0x24, 0xcc, 0x61, 0x66, 0xdf, + 0x72, 0xc4, 0x04, 0x7f, 0x53, 0xc8, 0xf9, 0x16, 0x26, 0x30, 0x38, 0x47, 0xcf, 0x19, 0x06, 0x0c, + 0xe5, 0x23, 0x9c, 0x9f, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x9a, 0x85, 0xb1, 0x86, 0xeb, 0xd1, 0xc3, + 0x67, 0xc7, 0x64, 0xc5, 0x95, 0x2e, 0x63, 0xc5, 0x04, 0xe3, 0x24, 0x3e, 0x72, 0xb5, 0x10, 0x88, + 0xfc, 0xeb, 0x5e, 0x3e, 0xd0, 0xae, 0x9b, 0x31, 0xcd, 0x39, 0xd4, 0x28, 0x66, 0x84, 0x43, 0x5c, + 0xd1, 0xe4, 0x44, 0xc5, 0xee, 0xe5, 0x44, 0xc3, 0xd9, 0x32, 0xa2, 0xe9, 0x97, 0x61, 0xe4, 0x9e, + 0x75, 0x23, 0xf6, 0x37, 0x8a, 0xf0, 0x50, 0x9b, 0x6d, 0xcf, 0xcf, 0x7a, 0x63, 0x0e, 0xb4, 0xb3, + 0x3e, 0x35, 0x0f, 0x65, 0x38, 0xb2, 0xde, 0xaa, 0xd7, 0x77, 0x98, 0x23, 0x18, 0xa9, 0x49, 0x0c, + 0xc1, 0x53, 0x4a, 0xe1, 0xc8, 0x91, 0xa5, 0x0c, 0x1c, 0x9c, 0x59, 0x93, 0x3e, 0xb1, 0xe8, 0x4d, + 0xb2, 0xa3, 0x48, 0x25, 0x9e, 0x58, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x82, 0x09, 0x67, 0xdb, + 0x71, 0x79, 0xfa, 0x0b, 0x49, 0x80, 0xbf, 0xb1, 0x94, 0x2c, 0x7a, 0x36, 0x89, 0x80, 0xd3, 0x75, + 0xd0, 0xeb, 0x80, 0xfc, 0x5b, 0xcc, 0xb9, 0xa4, 0x76, 0x89, 0x78, 0x42, 0xeb, 0xce, 0xe6, 0xae, + 0x18, 0x1f, 0x09, 0xd7, 0x52, 0x18, 0x38, 0xa3, 0x56, 0x22, 0x18, 0x5f, 0x5f, 0x7e, 0x30, 0xbe, + 0xf6, 0xe7, 0x62, 0xc7, 0xcc, 0x81, 0x6f, 0xc1, 0xc8, 0x41, 0x2d, 0xa6, 0x9f, 0x80, 0xfe, 0x40, + 0xe4, 0x64, 0x4f, 0x78, 0x5d, 0xcb, 0x8c, 0xd5, 0x12, 0x6e, 0xff, 0x17, 0x0b, 0x94, 0x2c, 0xd9, + 0x8c, 0xbb, 0xfd, 0x32, 0x33, 0xff, 0xe6, 0x52, 0x70, 0x2d, 0xd4, 0xd6, 0x51, 0xcd, 0xfc, 0x3b, + 0x06, 0x62, 0x13, 0x97, 0x2f, 0xb7, 0x30, 0x8e, 0xf0, 0x60, 0x3c, 0x20, 0x44, 0x58, 0x50, 0x85, + 0x81, 0x3e, 0x06, 0xfd, 0x35, 0x77, 0xdb, 0x0d, 0x85, 0x1c, 0xed, 0xc0, 0x7a, 0xbb, 0xf8, 0xfb, + 0x16, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x62, 0x81, 0xd2, 0x4b, 0x5e, 0x26, 0x4e, 0x3d, 0xda, + 0x44, 0xaf, 0x01, 0x48, 0x0a, 0x4a, 0xf6, 0x26, 0xad, 0xa5, 0x00, 0x2b, 0xc8, 0xbe, 0xf1, 0x0f, + 0x6b, 0x75, 0xd0, 0xab, 0xd0, 0xb7, 0xc9, 0x68, 0x89, 0x6f, 0x3b, 0xab, 0x54, 0x5d, 0xac, 0x74, + 0x7f, 0xb7, 0x74, 0xc4, 0x6c, 0x53, 0xde, 0x62, 0xbc, 0x96, 0xfd, 0x43, 0x85, 0x78, 0x4e, 0xdf, + 0x68, 0xf9, 0x91, 0x73, 0x08, 0x9c, 0xc8, 0x25, 0x83, 0x13, 0x79, 0x2c, 0x7b, 0xa1, 0x6a, 0x5d, + 0xca, 0xe5, 0x40, 0xae, 0x25, 0x38, 0x90, 0xc7, 0x3b, 0x93, 0x6a, 0xcf, 0x79, 0xfc, 0x73, 0x0b, + 0x26, 0x0c, 0xfc, 0x43, 0xb8, 0x00, 0x97, 0xcc, 0x0b, 0xf0, 0xe1, 0x8e, 0xdf, 0x90, 0x73, 0xf1, + 0x7d, 0x7f, 0x31, 0xd1, 0x77, 0x76, 0xe1, 0xbd, 0x0d, 0x3d, 0x9b, 0x4e, 0x50, 0x13, 0xef, 0xfa, + 0xf3, 0x5d, 0x8d, 0xf5, 0xcc, 0x65, 0x27, 0x10, 0x96, 0x16, 0x4f, 0xcb, 0x51, 0xa7, 0x45, 0x1d, + 0xad, 0x2c, 0x58, 0x53, 0xe8, 0x22, 0xf4, 0x85, 0x55, 0xbf, 0xa9, 0xfc, 0xe4, 0x58, 0x3a, 0xed, + 0x0a, 0x2b, 0xd9, 0xdf, 0x2d, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0x3e, 0x7a, 0x13, 0x46, 0xd8, + 0x2f, 0x65, 0xf6, 0x58, 0xcc, 0x97, 0xc0, 0x54, 0x74, 0x44, 0x6e, 0x13, 0x6c, 0x14, 0x61, 0x93, + 0xd4, 0xf4, 0x06, 0x0c, 0xaa, 0xcf, 0x7a, 0xa0, 0xda, 0xfa, 0xff, 0x58, 0x84, 0xc9, 0x8c, 0x35, + 0x87, 0x42, 0x63, 0x26, 0x2e, 0x74, 0xb9, 0x54, 0xdf, 0xe1, 0x5c, 0x84, 0xec, 0x01, 0x58, 0x13, + 0x6b, 0xab, 0xeb, 0x46, 0xaf, 0x87, 0x24, 0xd9, 0x28, 0x2d, 0xea, 0xdc, 0x28, 0x6d, 0xec, 0xd0, + 0x86, 0x9a, 0x36, 0xa4, 0x7a, 0xfa, 0x40, 0xe7, 0xf4, 0x37, 0x7b, 0xe0, 0x48, 0x56, 0x0c, 0x69, + 0xf4, 0x19, 0x10, 0xe9, 0xff, 0xc5, 0xb4, 0x3e, 0xdf, 0x6e, 0x84, 0xf5, 0x9a, 0x33, 0xcc, 0x17, + 0x4c, 0x84, 0x6e, 0x9d, 0x91, 0xc7, 0x11, 0x2f, 0xec, 0x38, 0xcc, 0xa2, 0x4d, 0x16, 0x52, 0x49, + 0xdc, 0x9e, 0xf2, 0xf8, 0xf8, 0x50, 0xd7, 0x1d, 0x10, 0xf7, 0x6f, 0x98, 0x30, 0xa9, 0x92, 0xc5, + 0x9d, 0x4d, 0xaa, 0x64, 0xcb, 0x68, 0x19, 0xfa, 0xaa, 0xdc, 0x56, 0xa7, 0xd8, 0xf9, 0x08, 0xe3, + 0x86, 0x3a, 0xea, 0x00, 0x16, 0x06, 0x3a, 0x82, 0xc0, 0xb4, 0x0b, 0x43, 0xda, 0xc0, 0x3c, 0xd0, + 0xc5, 0xb3, 0x45, 0x2f, 0x3e, 0x6d, 0x08, 0x1e, 0xe8, 0x02, 0xfa, 0x31, 0xed, 0xee, 0x17, 0xe7, + 0xc1, 0x07, 0x0d, 0xde, 0xe9, 0x64, 0xc2, 0x05, 0x2f, 0xb1, 0xaf, 0x18, 0x2f, 0x55, 0x31, 0x63, + 0x9e, 0xe7, 0xa6, 0x4e, 0x32, 0x2f, 0xfc, 0xf6, 0x71, 0xce, 0xed, 0x1f, 0xb7, 0x20, 0xe1, 0x24, + 0xa5, 0xc4, 0x9d, 0x56, 0xae, 0xb8, 0xf3, 0x0c, 0xf4, 0x04, 0x7e, 0x9d, 0x24, 0x53, 0xd3, 0x63, + 0xbf, 0x4e, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x16, 0x62, 0x0d, 0xeb, 0x0f, 0x74, 0xf1, 0xf4, 0x7e, + 0x04, 0x7a, 0xeb, 0x64, 0x9b, 0xd4, 0x93, 0x19, 0x44, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xf6, 0x2f, + 0xf7, 0xc0, 0xa9, 0xb6, 0x91, 0xd7, 0x28, 0x83, 0xb9, 0xe1, 0x44, 0xe4, 0xb6, 0xb3, 0x93, 0xcc, + 0x9c, 0x77, 0x89, 0x17, 0x63, 0x09, 0x67, 0xce, 0xc8, 0x3c, 0x93, 0x4c, 0x42, 0x38, 0x2c, 0x12, + 0xc8, 0x08, 0xa8, 0x29, 0x6c, 0x2c, 0xde, 0x0f, 0x61, 0xe3, 0xb3, 0x00, 0x61, 0x58, 0xe7, 0x06, + 0x97, 0x35, 0xe1, 0xe5, 0x1c, 0x67, 0x1c, 0xaa, 0x5c, 0x15, 0x10, 0xac, 0x61, 0xa1, 0x05, 0x18, + 0x6f, 0x06, 0x7e, 0xc4, 0x65, 0xed, 0x0b, 0xdc, 0x26, 0xb9, 0xd7, 0x0c, 0x7a, 0x55, 0x4e, 0xc0, + 0x71, 0xaa, 0x06, 0x7a, 0x01, 0x86, 0x44, 0x20, 0xac, 0xb2, 0xef, 0xd7, 0x85, 0x78, 0x4f, 0x99, + 0xe9, 0x56, 0x62, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x4c, 0x80, 0xdf, 0x9f, 0x59, 0x8d, 0x0b, 0xf1, + 0x35, 0xbc, 0x44, 0xd0, 0xfc, 0x81, 0xae, 0x82, 0xe6, 0xc7, 0x02, 0xcf, 0xc1, 0xae, 0xf5, 0xc9, + 0xd0, 0x51, 0x44, 0xf8, 0x95, 0x1e, 0x98, 0x14, 0x0b, 0xe7, 0x41, 0x2f, 0x97, 0xeb, 0xe9, 0xe5, + 0x72, 0x3f, 0x44, 0xa2, 0xef, 0xaf, 0x99, 0xc3, 0x5e, 0x33, 0x3f, 0x6c, 0x81, 0xc9, 0x43, 0xa2, + 0xff, 0x2f, 0x37, 0xf5, 0xe8, 0x0b, 0xb9, 0x3c, 0x69, 0x1c, 0x51, 0xfb, 0x9d, 0x25, 0x21, 0xb5, + 0xff, 0x93, 0x05, 0x0f, 0x77, 0xa4, 0x88, 0x16, 0x61, 0x90, 0x31, 0xba, 0xda, 0xbb, 0xf8, 0x71, + 0xe5, 0xb3, 0x20, 0x01, 0x39, 0x7c, 0x77, 0x5c, 0x13, 0x2d, 0xa6, 0x72, 0xbc, 0x3e, 0x91, 0x91, + 0xe3, 0xf5, 0xa8, 0x31, 0x3c, 0xf7, 0x98, 0xe4, 0xf5, 0x0b, 0xf4, 0xc6, 0x31, 0x7d, 0x12, 0x3f, + 0x64, 0x88, 0x73, 0xed, 0x84, 0x38, 0x17, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x1a, 0x8c, 0xb3, 0x08, + 0x99, 0xcc, 0x83, 0x46, 0x38, 0x33, 0x16, 0x62, 0x2b, 0xf9, 0xab, 0x09, 0x18, 0x4e, 0x61, 0xdb, + 0x7f, 0x5a, 0x84, 0x3e, 0xbe, 0xfd, 0x0e, 0xe1, 0xe1, 0xfb, 0x14, 0x0c, 0xba, 0x8d, 0x46, 0x8b, + 0xa7, 0xed, 0xec, 0x8d, 0x6d, 0xae, 0x97, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x49, 0x68, 0x12, 0xda, + 0x04, 0xe1, 0xe6, 0x1d, 0x9f, 0x59, 0x70, 0x22, 0x87, 0x73, 0x71, 0xea, 0x9e, 0x8d, 0x75, 0x0e, + 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde, 0x06, 0x2d, 0x13, 0x99, 0x1a, 0x9e, 0x6c, 0x43, 0xad, + 0xa2, 0x90, 0x39, 0xcd, 0xf8, 0xcc, 0x51, 0x00, 0xac, 0x51, 0x44, 0x33, 0xc6, 0x4d, 0x3f, 0x9d, + 0x98, 0x3b, 0xe0, 0x54, 0xe3, 0x39, 0x9b, 0x7e, 0x11, 0x06, 0x15, 0xf1, 0x4e, 0x72, 0xc5, 0x61, + 0x9d, 0x61, 0xfb, 0x08, 0x8c, 0x25, 0xfa, 0x76, 0x20, 0xb1, 0xe4, 0xaf, 0x58, 0x30, 0xc6, 0x3b, + 0xb3, 0xe8, 0x6d, 0x8b, 0xdb, 0xe0, 0x2e, 0x1c, 0xa9, 0x67, 0x9c, 0xca, 0x62, 0xfa, 0xbb, 0x3f, + 0xc5, 0x95, 0x18, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0xe7, 0xe8, 0x8e, 0xa3, 0xa7, 0xae, 0x53, + 0x17, 0xd1, 0x36, 0x86, 0xf9, 0x6e, 0xe3, 0x65, 0x58, 0x41, 0xed, 0x3f, 0xb4, 0x60, 0x82, 0xf7, + 0xfc, 0x0a, 0xd9, 0x51, 0x67, 0xd3, 0xb7, 0xb2, 0xef, 0x22, 0x61, 0x74, 0x21, 0x27, 0x61, 0xb4, + 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, 0x65, 0x0b, 0xc4, 0x0a, 0x39, 0x04, 0x49, 0xcb, 0x77, 0x98, + 0x92, 0x96, 0xe9, 0xfc, 0x4d, 0x90, 0x23, 0x62, 0xf9, 0x6b, 0x0b, 0xc6, 0x39, 0x42, 0x6c, 0x05, + 0xf1, 0x2d, 0x9d, 0x87, 0x39, 0xf3, 0x8b, 0x32, 0xcd, 0x5a, 0xaf, 0x90, 0x9d, 0x35, 0xbf, 0xec, + 0x44, 0x9b, 0xd9, 0x1f, 0x65, 0x4c, 0x56, 0x4f, 0xdb, 0xc9, 0xaa, 0xc9, 0x0d, 0x64, 0x24, 0x26, + 0xec, 0x20, 0x00, 0x3e, 0x68, 0x62, 0x42, 0xfb, 0xcf, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0x71, 0xa3, + 0xec, 0x10, 0x2b, 0xd5, 0x2e, 0xba, 0xf8, 0x68, 0x52, 0x10, 0xac, 0x61, 0xdd, 0x97, 0xe1, 0x49, + 0x98, 0xb2, 0x14, 0x3b, 0x9b, 0xb2, 0x1c, 0x60, 0x44, 0xbf, 0xdc, 0x0f, 0x49, 0x9f, 0x49, 0x74, + 0x03, 0x86, 0xab, 0x4e, 0xd3, 0xb9, 0xe5, 0xd6, 0xdd, 0xc8, 0x25, 0x61, 0x3b, 0x3b, 0xb7, 0x79, + 0x0d, 0x4f, 0x18, 0x1f, 0x68, 0x25, 0xd8, 0xa0, 0x83, 0x66, 0x00, 0x9a, 0x81, 0xbb, 0xed, 0xd6, + 0xc9, 0x06, 0x13, 0x08, 0xb1, 0xf8, 0x3e, 0xdc, 0xe8, 0x4e, 0x96, 0x62, 0x0d, 0x23, 0x23, 0xf4, + 0x46, 0xf1, 0x01, 0x87, 0xde, 0x80, 0x43, 0x0b, 0xbd, 0xd1, 0x73, 0xa0, 0xd0, 0x1b, 0x03, 0x07, + 0x0e, 0xbd, 0xd1, 0xdb, 0x55, 0xe8, 0x0d, 0x0c, 0xc7, 0x24, 0xef, 0x49, 0xff, 0x2f, 0xb9, 0x75, + 0x22, 0x1e, 0x1c, 0x3c, 0x28, 0xd1, 0xf4, 0xde, 0x6e, 0xe9, 0x18, 0xce, 0xc4, 0xc0, 0x39, 0x35, + 0xd1, 0x47, 0x61, 0xca, 0xa9, 0xd7, 0xfd, 0xdb, 0x6a, 0x52, 0x17, 0xc3, 0xaa, 0x53, 0xe7, 0xca, + 0xa5, 0x7e, 0x46, 0xf5, 0xe4, 0xde, 0x6e, 0x69, 0x6a, 0x36, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf, + 0xc0, 0x60, 0x33, 0xf0, 0xab, 0x2b, 0x9a, 0x63, 0xf7, 0x69, 0x3a, 0x80, 0x65, 0x59, 0xb8, 0xbf, + 0x5b, 0x1a, 0x51, 0x7f, 0xd8, 0x85, 0x1f, 0x57, 0xc8, 0x88, 0x6a, 0x31, 0xf4, 0xa0, 0xa3, 0x5a, + 0x0c, 0xdf, 0xef, 0xa8, 0x16, 0x5b, 0x30, 0x59, 0x21, 0x81, 0xeb, 0xd4, 0xdd, 0xbb, 0x94, 0x27, + 0x97, 0x67, 0xe0, 0x1a, 0x0c, 0x06, 0x89, 0x53, 0xbf, 0xab, 0xe0, 0xdb, 0x9a, 0x5c, 0x46, 0x9e, + 0xf2, 0x31, 0x21, 0xfb, 0x7f, 0x5b, 0xd0, 0x2f, 0xfc, 0x30, 0x0f, 0x81, 0x33, 0x9d, 0x35, 0x54, + 0x32, 0xa5, 0xec, 0x49, 0x61, 0x9d, 0xc9, 0x55, 0xc6, 0x2c, 0x27, 0x94, 0x31, 0x0f, 0xb7, 0x23, + 0xd2, 0x5e, 0x0d, 0xf3, 0x77, 0x8b, 0xf4, 0x85, 0x60, 0x44, 0x04, 0x78, 0xf0, 0x43, 0xb0, 0x0a, + 0xfd, 0xa1, 0xf0, 0x48, 0x2f, 0xe4, 0xfb, 0xf2, 0x24, 0x27, 0x31, 0xb6, 0x81, 0x14, 0x3e, 0xe8, + 0x92, 0x48, 0xa6, 0xab, 0x7b, 0xf1, 0x01, 0xba, 0xba, 0x77, 0x8a, 0x99, 0xd0, 0x73, 0x3f, 0x62, + 0x26, 0xd8, 0x5f, 0x63, 0xb7, 0xb3, 0x5e, 0x7e, 0x08, 0x8c, 0xdb, 0x25, 0xf3, 0x1e, 0xb7, 0xdb, + 0xac, 0x2c, 0xd1, 0xa9, 0x1c, 0x06, 0xee, 0x97, 0x2c, 0x38, 0x95, 0xf1, 0x55, 0x1a, 0x37, 0xf7, + 0x34, 0x0c, 0x38, 0xad, 0x9a, 0xab, 0xf6, 0xb2, 0xa6, 0x2d, 0x9e, 0x15, 0xe5, 0x58, 0x61, 0xa0, + 0x79, 0x98, 0x20, 0x77, 0x9a, 0x2e, 0x57, 0xc3, 0xeb, 0xa6, 0xe3, 0x45, 0xee, 0xbc, 0xbb, 0x98, + 0x04, 0xe2, 0x34, 0xbe, 0x0a, 0x89, 0x56, 0xcc, 0x0d, 0x89, 0xf6, 0x0b, 0x16, 0x0c, 0x29, 0x9f, + 0xec, 0x07, 0x3e, 0xda, 0xaf, 0x99, 0xa3, 0xfd, 0x50, 0x9b, 0xd1, 0xce, 0x19, 0xe6, 0x3f, 0x28, + 0xa8, 0xfe, 0x96, 0xfd, 0x20, 0xea, 0x82, 0x4b, 0xbc, 0x77, 0xb7, 0x97, 0x0b, 0x30, 0xe4, 0x34, + 0x9b, 0x12, 0x20, 0xed, 0x17, 0x59, 0x2a, 0x85, 0xb8, 0x18, 0xeb, 0x38, 0xca, 0x0b, 0xa7, 0x98, + 0xeb, 0x85, 0x53, 0x03, 0x88, 0x9c, 0x60, 0x83, 0x44, 0xb4, 0x4c, 0x98, 0x5b, 0xe7, 0x9f, 0x37, + 0xad, 0xc8, 0xad, 0xcf, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0xcc, 0xb2, 0x17, 0x5d, 0x0b, 0xf8, 0x33, + 0x55, 0x0b, 0x2a, 0xa8, 0x68, 0x61, 0x8d, 0xae, 0x8c, 0x3f, 0xc2, 0xda, 0xe8, 0x35, 0x0d, 0x61, + 0x56, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x8b, 0xec, 0xf6, 0x61, 0x63, 0x7a, 0xb0, 0x80, 0x7a, 0x7f, + 0x3e, 0xac, 0x66, 0x83, 0xa9, 0x84, 0x17, 0xf4, 0xb0, 0x7d, 0xed, 0x0f, 0x7b, 0xda, 0xb0, 0xee, + 0xcf, 0x1a, 0xc7, 0xf6, 0x43, 0x1f, 0x4f, 0x19, 0x37, 0x3d, 0xd3, 0xe1, 0xd6, 0x38, 0x80, 0x39, + 0x13, 0xcb, 0xab, 0xc6, 0xb2, 0x4e, 0x2d, 0x97, 0xc5, 0xbe, 0xd0, 0xf2, 0xaa, 0x09, 0x00, 0x8e, + 0x71, 0x28, 0xc3, 0xa6, 0xfe, 0x84, 0x53, 0x28, 0x0e, 0xbf, 0xad, 0xb0, 0x43, 0xac, 0x61, 0xa0, + 0xf3, 0x42, 0x68, 0xc1, 0x75, 0x0f, 0x0f, 0x25, 0x84, 0x16, 0x72, 0xb8, 0x34, 0x49, 0xd3, 0x05, + 0x18, 0x22, 0x77, 0x22, 0x12, 0x78, 0x4e, 0x9d, 0xb6, 0xd0, 0x1b, 0x47, 0x8c, 0x5d, 0x8c, 0x8b, + 0xb1, 0x8e, 0x83, 0xd6, 0x60, 0x2c, 0xe4, 0xb2, 0x3c, 0x95, 0xf4, 0x81, 0xcb, 0x44, 0x9f, 0x54, + 0xde, 0xf0, 0x26, 0x78, 0x9f, 0x15, 0xf1, 0xd3, 0x49, 0xc6, 0x08, 0x49, 0x92, 0x40, 0xaf, 0xc2, + 0x68, 0xdd, 0x77, 0x6a, 0x73, 0x4e, 0xdd, 0xf1, 0xaa, 0x6c, 0x7c, 0x06, 0xcc, 0xec, 0xfc, 0x57, + 0x0d, 0x28, 0x4e, 0x60, 0x53, 0x06, 0x51, 0x2f, 0x11, 0x89, 0x4a, 0x1c, 0x6f, 0x83, 0x84, 0x53, + 0x83, 0xec, 0xab, 0x18, 0x83, 0x78, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x17, 0x61, 0x58, 0x7e, + 0xbe, 0x16, 0x52, 0x27, 0x76, 0x68, 0xd2, 0x60, 0xd8, 0xc0, 0x44, 0x21, 0x1c, 0x95, 0xff, 0xd7, + 0x02, 0x67, 0x7d, 0xdd, 0xad, 0x8a, 0x38, 0x13, 0xdc, 0xf9, 0xfb, 0x23, 0xd2, 0xd3, 0x74, 0x31, + 0x0b, 0x69, 0x7f, 0xb7, 0x74, 0x52, 0x8c, 0x5a, 0x26, 0x1c, 0x67, 0xd3, 0x46, 0x2b, 0x30, 0xc9, + 0x6d, 0x60, 0xe6, 0x37, 0x49, 0x75, 0x4b, 0x6e, 0x38, 0xc6, 0x35, 0x6a, 0x8e, 0x3f, 0x97, 0xd3, + 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x0b, 0xa6, 0x9a, 0xad, 0x5b, 0x75, 0x37, 0xdc, 0x5c, 0xf5, 0x23, + 0x66, 0x42, 0x36, 0x5b, 0xab, 0x05, 0x24, 0xe4, 0xbe, 0xc1, 0xec, 0xea, 0x95, 0x61, 0x90, 0xca, + 0x39, 0x78, 0x38, 0x97, 0x02, 0xba, 0x0b, 0x47, 0x13, 0x0b, 0x41, 0xc4, 0x33, 0x19, 0xcd, 0x4f, + 0xf9, 0x54, 0xc9, 0xaa, 0x20, 0x42, 0x03, 0x65, 0x81, 0x70, 0x76, 0x13, 0xe8, 0x25, 0x00, 0xb7, + 0xb9, 0xe4, 0x34, 0xdc, 0x3a, 0x7d, 0x8e, 0x4e, 0xb2, 0x35, 0x42, 0x9f, 0x26, 0xb0, 0x5c, 0x96, + 0xa5, 0xf4, 0x6c, 0x16, 0xff, 0x76, 0xb0, 0x86, 0x8d, 0xae, 0xc2, 0xa8, 0xf8, 0xb7, 0x23, 0xa6, + 0x74, 0x42, 0x65, 0x07, 0x1d, 0x95, 0x35, 0xd4, 0x3c, 0x26, 0x4a, 0x70, 0xa2, 0x2e, 0xda, 0x80, + 0x53, 0x32, 0x35, 0xa9, 0xbe, 0x3e, 0xe5, 0x1c, 0x84, 0x2c, 0xcf, 0xd2, 0x00, 0xf7, 0x29, 0x9a, + 0x6d, 0x87, 0x88, 0xdb, 0xd3, 0xa1, 0xf7, 0xba, 0xbe, 0xcc, 0xb9, 0xc7, 0xf8, 0xd1, 0x38, 0x12, + 0xe6, 0xd5, 0x24, 0x10, 0xa7, 0xf1, 0x91, 0x0f, 0x47, 0x5d, 0x2f, 0x6b, 0x55, 0x1f, 0x63, 0x84, + 0x3e, 0xcc, 0x9d, 0xe5, 0xdb, 0xaf, 0xe8, 0x4c, 0x38, 0xce, 0xa6, 0x8b, 0x96, 0x61, 0x32, 0xe2, + 0x05, 0x0b, 0x6e, 0xc8, 0xd3, 0xb8, 0xd0, 0x67, 0xdf, 0x71, 0xd6, 0xdc, 0x71, 0xba, 0x9a, 0xd7, + 0xd2, 0x60, 0x9c, 0x55, 0xe7, 0x9d, 0x19, 0x80, 0x7e, 0xdd, 0xa2, 0xb5, 0x35, 0x46, 0x1f, 0x7d, + 0x0a, 0x86, 0xf5, 0xf1, 0x11, 0x4c, 0xcb, 0xd9, 0x6c, 0x3e, 0x58, 0x3b, 0x5e, 0xf8, 0x33, 0x41, + 0x1d, 0x21, 0x3a, 0x0c, 0x1b, 0x14, 0x51, 0x35, 0x23, 0xc8, 0xc5, 0xf9, 0xee, 0x98, 0xa2, 0xee, + 0xed, 0x1f, 0x09, 0x64, 0xef, 0x1c, 0x74, 0x15, 0x06, 0xaa, 0x75, 0x97, 0x78, 0xd1, 0x72, 0xb9, + 0x5d, 0x80, 0xd2, 0x79, 0x81, 0x23, 0xb6, 0xa2, 0xc8, 0xbe, 0xc4, 0xcb, 0xb0, 0xa2, 0x60, 0x5f, + 0x84, 0xa1, 0x4a, 0x9d, 0x90, 0x26, 0xf7, 0xe3, 0x42, 0x4f, 0xb0, 0x87, 0x09, 0x63, 0x2d, 0x2d, + 0xc6, 0x5a, 0xea, 0x6f, 0x0e, 0xc6, 0x54, 0x4a, 0xb8, 0xfd, 0xdb, 0x05, 0x28, 0x75, 0x48, 0x02, + 0x96, 0xd0, 0xb7, 0x59, 0x5d, 0xe9, 0xdb, 0x66, 0x61, 0x2c, 0xfe, 0xa7, 0x8b, 0xf2, 0x94, 0x31, + 0xf4, 0x0d, 0x13, 0x8c, 0x93, 0xf8, 0x5d, 0xfb, 0xb5, 0xe8, 0x2a, 0xbb, 0x9e, 0x8e, 0x9e, 0x59, + 0x86, 0xaa, 0xbe, 0xb7, 0xfb, 0xb7, 0x77, 0xae, 0xda, 0xd5, 0xfe, 0x5a, 0x01, 0x8e, 0xaa, 0x21, + 0x7c, 0xef, 0x0e, 0xdc, 0xf5, 0xf4, 0xc0, 0xdd, 0x07, 0xa5, 0xb5, 0x7d, 0x0d, 0xfa, 0x78, 0xd4, + 0xd4, 0x2e, 0x78, 0xfe, 0x47, 0xcc, 0xe0, 0xf4, 0x8a, 0xcd, 0x34, 0x02, 0xd4, 0xff, 0x80, 0x05, + 0x63, 0x09, 0x07, 0x49, 0x84, 0x35, 0x2f, 0xfa, 0x7b, 0xe1, 0xcb, 0xb3, 0x38, 0xfe, 0x33, 0xd0, + 0xb3, 0xe9, 0x2b, 0x23, 0x65, 0x85, 0x71, 0xd9, 0x0f, 0x23, 0xcc, 0x20, 0xf6, 0x1f, 0x59, 0xd0, + 0xbb, 0xe6, 0xb8, 0x5e, 0x24, 0xb5, 0x1f, 0x56, 0x8e, 0xf6, 0xa3, 0x9b, 0xef, 0x42, 0x2f, 0x40, + 0x1f, 0x59, 0x5f, 0x27, 0xd5, 0x48, 0xcc, 0xaa, 0x8c, 0xa6, 0xd1, 0xb7, 0xc8, 0x4a, 0x29, 0x13, + 0xca, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x37, 0x61, 0x30, 0x72, 0x1b, 0x64, 0xb6, 0x56, 0x13, + 0x36, 0x01, 0xf7, 0x10, 0x02, 0x66, 0x4d, 0x12, 0xc0, 0x31, 0x2d, 0xfb, 0x8b, 0x05, 0x80, 0x38, + 0x14, 0x5c, 0xa7, 0x4f, 0x9c, 0x4b, 0x69, 0x8b, 0xcf, 0x66, 0x68, 0x8b, 0x51, 0x4c, 0x30, 0x43, + 0x55, 0xac, 0x86, 0xa9, 0xd8, 0xd5, 0x30, 0xf5, 0x1c, 0x64, 0x98, 0xe6, 0x61, 0x22, 0x0e, 0x65, + 0x67, 0x46, 0xf2, 0x64, 0xf7, 0xf7, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0x36, 0x81, 0x33, 0x2a, 0xa2, + 0x97, 0xb8, 0x0b, 0x99, 0x2b, 0x81, 0xae, 0x7d, 0xef, 0x30, 0x4e, 0xb1, 0x3a, 0xbc, 0x90, 0xab, + 0x0e, 0xff, 0x29, 0x0b, 0x8e, 0x24, 0xdb, 0x61, 0x7e, 0xf7, 0x9f, 0xb7, 0xe0, 0x68, 0x9c, 0x03, + 0x27, 0x6d, 0x82, 0xf0, 0x7c, 0xdb, 0x28, 0x65, 0x39, 0x3d, 0x8e, 0xc3, 0xb6, 0xac, 0x64, 0x91, + 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xab, 0x07, 0xa6, 0xf2, 0xc2, 0x9b, 0x31, 0x4f, 0x23, 0xe7, 0x4e, + 0x65, 0x8b, 0xdc, 0x16, 0xfe, 0x1c, 0xb1, 0xa7, 0x11, 0x2f, 0xc6, 0x12, 0x9e, 0x4c, 0x7b, 0x54, + 0xe8, 0x32, 0xed, 0xd1, 0x26, 0x4c, 0xdc, 0xde, 0x24, 0xde, 0x75, 0x2f, 0x74, 0x22, 0x37, 0x5c, + 0x77, 0x99, 0x02, 0x9d, 0xaf, 0x1b, 0x99, 0xba, 0x7f, 0xe2, 0x66, 0x12, 0x61, 0x7f, 0xb7, 0x74, + 0xca, 0x28, 0x88, 0xbb, 0xcc, 0x0f, 0x12, 0x9c, 0x26, 0x9a, 0xce, 0x1a, 0xd5, 0xf3, 0x80, 0xb3, + 0x46, 0x35, 0x5c, 0x61, 0x76, 0x23, 0xdd, 0x48, 0xd8, 0xb3, 0x75, 0x45, 0x95, 0x62, 0x0d, 0x03, + 0x7d, 0x02, 0x90, 0x9e, 0xf6, 0xcf, 0x88, 0x2e, 0xfb, 0xcc, 0xde, 0x6e, 0x09, 0xad, 0xa6, 0xa0, + 0xfb, 0xbb, 0xa5, 0x49, 0x5a, 0xba, 0xec, 0xd1, 0xe7, 0x6f, 0x1c, 0x92, 0x2f, 0x83, 0x10, 0xba, + 0x09, 0xe3, 0xb4, 0x94, 0xed, 0x28, 0x19, 0xba, 0x96, 0x3f, 0x59, 0x9f, 0xda, 0xdb, 0x2d, 0x8d, + 0xaf, 0x26, 0x60, 0x79, 0xa4, 0x53, 0x44, 0x32, 0x92, 0x47, 0x0d, 0x74, 0x9b, 0x3c, 0xca, 0xfe, + 0xbc, 0x05, 0x27, 0xe8, 0x05, 0x57, 0xbb, 0x9a, 0xa3, 0x45, 0x77, 0x9a, 0x2e, 0xd7, 0xd3, 0x88, + 0xab, 0x86, 0xc9, 0xea, 0xca, 0xcb, 0x5c, 0x4b, 0xa3, 0xa0, 0xf4, 0x84, 0xdf, 0x72, 0xbd, 0x5a, + 0xf2, 0x84, 0xbf, 0xe2, 0x7a, 0x35, 0xcc, 0x20, 0xea, 0xca, 0x2a, 0xe6, 0x46, 0xa9, 0xff, 0x0a, + 0xdd, 0xab, 0xb4, 0x2f, 0xdf, 0xd2, 0x6e, 0xa0, 0xa7, 0x74, 0x9d, 0xaa, 0x30, 0x9f, 0xcc, 0xd5, + 0xa7, 0x7e, 0xce, 0x02, 0xe1, 0xfd, 0xde, 0xc5, 0x9d, 0xfc, 0x26, 0x0c, 0x6f, 0xa7, 0x53, 0xa2, + 0x9e, 0xc9, 0x0f, 0x07, 0x20, 0x12, 0xa1, 0x2a, 0x16, 0xdd, 0x48, 0x7f, 0x6a, 0xd0, 0xb2, 0x6b, + 0x20, 0xa0, 0x0b, 0x84, 0x69, 0x35, 0x3a, 0xf7, 0xe6, 0x59, 0x80, 0x1a, 0xc3, 0x65, 0x79, 0xd2, + 0x0b, 0x26, 0xc7, 0xb5, 0xa0, 0x20, 0x58, 0xc3, 0xb2, 0x7f, 0xae, 0x08, 0x43, 0x32, 0x05, 0x67, + 0xcb, 0xeb, 0x46, 0xf6, 0x78, 0xa0, 0x9c, 0xfc, 0xe8, 0x2d, 0x98, 0x08, 0x48, 0xb5, 0x15, 0x84, + 0xee, 0x36, 0x91, 0x60, 0xb1, 0x49, 0x66, 0x78, 0x92, 0x84, 0x04, 0x70, 0x9f, 0x85, 0xc8, 0x4a, + 0x14, 0x32, 0xa5, 0x71, 0x9a, 0x10, 0x3a, 0x0f, 0x83, 0x4c, 0xf4, 0x5e, 0x8e, 0x05, 0xc2, 0x4a, + 0xf0, 0xb5, 0x22, 0x01, 0x38, 0xc6, 0x61, 0x8f, 0x83, 0xd6, 0x2d, 0x86, 0x9e, 0xf0, 0x04, 0xaf, + 0xf0, 0x62, 0x2c, 0xe1, 0xe8, 0xa3, 0x30, 0xce, 0xeb, 0x05, 0x7e, 0xd3, 0xd9, 0xe0, 0x2a, 0xc1, + 0x5e, 0x15, 0x5e, 0x67, 0x7c, 0x25, 0x01, 0xdb, 0xdf, 0x2d, 0x1d, 0x49, 0x96, 0xb1, 0x6e, 0xa7, + 0xa8, 0x30, 0xcb, 0x3f, 0xde, 0x08, 0xbd, 0x33, 0x52, 0x06, 0x83, 0x31, 0x08, 0xeb, 0x78, 0xf6, + 0x5f, 0x59, 0x30, 0xa1, 0x4d, 0x55, 0xd7, 0x79, 0x2a, 0x8c, 0x41, 0x2a, 0x74, 0x31, 0x48, 0x07, + 0x8b, 0xf6, 0x90, 0x39, 0xc3, 0x3d, 0xf7, 0x69, 0x86, 0xed, 0x4f, 0x01, 0x4a, 0xe7, 0x77, 0x45, + 0xaf, 0x73, 0x43, 0x7e, 0x37, 0x20, 0xb5, 0x76, 0x0a, 0x7f, 0x3d, 0x72, 0x8e, 0xf4, 0x5c, 0xe5, + 0xb5, 0xb0, 0xaa, 0x6f, 0xff, 0x60, 0x0f, 0x8c, 0x27, 0x63, 0x75, 0xa0, 0xcb, 0xd0, 0xc7, 0xb9, + 0x74, 0x41, 0xbe, 0x8d, 0x3d, 0x99, 0x16, 0xe1, 0x83, 0xf1, 0x2b, 0x82, 0xd1, 0x17, 0xf5, 0xd1, + 0x5b, 0x30, 0x54, 0xf3, 0x6f, 0x7b, 0xb7, 0x9d, 0xa0, 0x36, 0x5b, 0x5e, 0x16, 0x27, 0x44, 0xa6, + 0x00, 0x6a, 0x21, 0x46, 0xd3, 0xa3, 0x86, 0x30, 0xdb, 0x89, 0x18, 0x84, 0x75, 0x72, 0x68, 0x8d, + 0xa5, 0x2c, 0x5a, 0x77, 0x37, 0x56, 0x9c, 0x66, 0x3b, 0xaf, 0xae, 0x79, 0x89, 0xa4, 0x51, 0x1e, + 0x11, 0x79, 0x8d, 0x38, 0x00, 0xc7, 0x84, 0xd0, 0x67, 0x60, 0x32, 0xcc, 0x51, 0x89, 0xe5, 0xa5, + 0xfb, 0x6e, 0xa7, 0x25, 0xe2, 0xc2, 0x94, 0x2c, 0xe5, 0x59, 0x56, 0x33, 0xe8, 0x0e, 0x20, 0x21, + 0x7a, 0x5e, 0x0b, 0x5a, 0x61, 0x34, 0xd7, 0xf2, 0x6a, 0x75, 0x99, 0xd2, 0xe8, 0x83, 0xd9, 0x72, + 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x8b, 0xdd, 0x9b, 0xc6, 0xc0, 0x19, 0x6d, 0xd8, 0x9f, 0xeb, 0x81, + 0x69, 0x99, 0x50, 0x39, 0xc3, 0x7b, 0xe5, 0xb3, 0x56, 0xc2, 0x7d, 0xe5, 0xa5, 0xfc, 0x83, 0xfe, + 0x81, 0x39, 0xb1, 0x7c, 0x21, 0xed, 0xc4, 0xf2, 0xca, 0x01, 0xbb, 0x71, 0xdf, 0x5c, 0x59, 0xde, + 0xb3, 0xfe, 0x27, 0x7b, 0x47, 0xc0, 0xb8, 0x9a, 0x11, 0xe6, 0x81, 0xd1, 0xcb, 0x52, 0x75, 0x94, + 0xf3, 0xfc, 0xbf, 0x2c, 0x70, 0x8c, 0xcb, 0x7e, 0x58, 0x86, 0x4f, 0x67, 0xe7, 0xac, 0xa2, 0x43, + 0x69, 0x92, 0x46, 0x33, 0xda, 0x59, 0x70, 0x03, 0xd1, 0xe3, 0x4c, 0x9a, 0x8b, 0x02, 0x27, 0x4d, + 0x53, 0x42, 0xb0, 0xa2, 0x83, 0xb6, 0x61, 0x62, 0x83, 0x45, 0x7c, 0xd2, 0x72, 0x1b, 0x8b, 0x73, + 0x21, 0x73, 0xdf, 0x5e, 0x9a, 0x5f, 0xcc, 0x4f, 0x84, 0xcc, 0x1f, 0x7f, 0x29, 0x14, 0x9c, 0x6e, + 0x82, 0x6e, 0x8d, 0x23, 0xce, 0xed, 0x70, 0xb1, 0xee, 0x84, 0x91, 0x5b, 0x9d, 0xab, 0xfb, 0xd5, + 0xad, 0x4a, 0xe4, 0x07, 0x32, 0x01, 0x62, 0xe6, 0xdb, 0x6b, 0xf6, 0x66, 0x25, 0x85, 0x6f, 0x34, + 0x3f, 0xb5, 0xb7, 0x5b, 0x3a, 0x92, 0x85, 0x85, 0x33, 0xdb, 0x42, 0xab, 0xd0, 0xbf, 0xe1, 0x46, + 0x98, 0x34, 0x7d, 0x71, 0x5a, 0x64, 0x1e, 0x85, 0x97, 0x38, 0x8a, 0xd1, 0x12, 0x8b, 0x48, 0x25, + 0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x4b, 0xa0, 0x2f, 0x5f, 0x00, 0x9b, 0xb6, 0xbd, 0xcb, 0xbc, + 0x06, 0x5e, 0x85, 0xa2, 0xb7, 0x1e, 0xb6, 0x8b, 0xc5, 0xb3, 0xba, 0x64, 0xc8, 0xcf, 0xe6, 0xfa, + 0xe9, 0xd3, 0x78, 0x75, 0xa9, 0x82, 0x69, 0x45, 0xe6, 0xf6, 0x1a, 0x56, 0x43, 0x57, 0x24, 0x5c, + 0xca, 0xf4, 0x02, 0x5e, 0xae, 0xcc, 0x57, 0x96, 0x0d, 0x1a, 0x2c, 0xaa, 0x21, 0x2b, 0xc6, 0xbc, + 0x3a, 0xba, 0x01, 0x83, 0x1b, 0xfc, 0xe0, 0x5b, 0x0f, 0x45, 0x52, 0xf5, 0xcc, 0xcb, 0xe8, 0x92, + 0x44, 0x32, 0xe8, 0xb1, 0x2b, 0x43, 0x81, 0x70, 0x4c, 0x0a, 0x7d, 0xce, 0x82, 0xa3, 0xc9, 0xac, + 0xf4, 0xcc, 0x59, 0x4d, 0x98, 0xa9, 0x65, 0x3a, 0x00, 0x94, 0xb3, 0x2a, 0x18, 0x0d, 0x32, 0xf5, + 0x4b, 0x26, 0x1a, 0xce, 0x6e, 0x8e, 0x0e, 0x74, 0x70, 0xab, 0xd6, 0x2e, 0x47, 0x4f, 0x22, 0x30, + 0x11, 0x1f, 0x68, 0x3c, 0xb7, 0x80, 0x69, 0x45, 0xb4, 0x06, 0xb0, 0x5e, 0x27, 0x22, 0xe2, 0xa3, + 0x30, 0x8a, 0xca, 0xbc, 0xfd, 0x97, 0x14, 0x96, 0xa0, 0xc3, 0x5e, 0xa2, 0x71, 0x29, 0xd6, 0xe8, + 0xd0, 0xa5, 0x54, 0x75, 0xbd, 0x1a, 0x09, 0x98, 0x72, 0x2b, 0x67, 0x29, 0xcd, 0x33, 0x8c, 0xf4, + 0x52, 0xe2, 0xe5, 0x58, 0x50, 0x60, 0xb4, 0x48, 0x73, 0x73, 0x3d, 0x6c, 0x97, 0x72, 0x62, 0x9e, + 0x34, 0x37, 0x13, 0x0b, 0x8a, 0xd3, 0x62, 0xe5, 0x58, 0x50, 0xa0, 0x5b, 0x66, 0x9d, 0x6e, 0x20, + 0x12, 0x4c, 0x8d, 0xe5, 0x6f, 0x99, 0x25, 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, + 0x49, 0x93, 0xdb, 0x19, 0x67, 0x34, 0x9f, 0xea, 0xc0, 0xed, 0x18, 0x74, 0xdb, 0xf3, 0x3b, 0x2f, + 0x41, 0x61, 0xbd, 0xca, 0x94, 0x62, 0x39, 0x3a, 0x83, 0xa5, 0x79, 0x83, 0x1a, 0x0b, 0xe1, 0xbe, + 0x34, 0x8f, 0x0b, 0xeb, 0x55, 0xba, 0xf4, 0x9d, 0xbb, 0xad, 0x80, 0x2c, 0xb9, 0x75, 0x22, 0xd2, + 0x4f, 0x64, 0x2e, 0xfd, 0x59, 0x89, 0x94, 0x5e, 0xfa, 0x0a, 0x84, 0x63, 0x52, 0x94, 0x6e, 0xcc, + 0x83, 0x4d, 0xe6, 0xd3, 0x55, 0xac, 0x56, 0x9a, 0x6e, 0x26, 0x17, 0xb6, 0x05, 0x23, 0xdb, 0x61, + 0x73, 0x93, 0xc8, 0x53, 0x91, 0xa9, 0xeb, 0x72, 0x22, 0x55, 0xdc, 0x10, 0x88, 0x6e, 0x10, 0xb5, + 0x9c, 0x7a, 0xea, 0x20, 0x67, 0xa2, 0x95, 0x1b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0x0b, 0xe1, 0x6d, + 0x1e, 0x4e, 0x8e, 0x29, 0xee, 0x72, 0x16, 0x42, 0x46, 0xc4, 0x39, 0xbe, 0x10, 0x04, 0x00, 0x4b, + 0x22, 0x6a, 0xb0, 0xd9, 0x05, 0x74, 0xac, 0xc3, 0x60, 0xa7, 0xfa, 0x1b, 0x0f, 0x36, 0xbb, 0x70, + 0x62, 0x52, 0xec, 0xa2, 0x69, 0x66, 0x24, 0xf0, 0x67, 0x6a, 0xbb, 0x9c, 0x8b, 0xa6, 0x53, 0xc2, + 0x7f, 0x7e, 0xd1, 0x64, 0x61, 0xe1, 0xcc, 0xb6, 0xe8, 0xc7, 0x35, 0x65, 0x64, 0x40, 0x91, 0x22, + 0xe3, 0x89, 0x9c, 0xc0, 0x9a, 0xe9, 0xf0, 0x81, 0xfc, 0xe3, 0x14, 0x08, 0xc7, 0xa4, 0x50, 0x0d, + 0x46, 0x9b, 0x46, 0xc4, 0x59, 0x96, 0xea, 0x23, 0x87, 0x2f, 0xc8, 0x8a, 0x4d, 0xcb, 0x25, 0x44, + 0x26, 0x04, 0x27, 0x68, 0x32, 0xcb, 0x3d, 0xee, 0xea, 0xc7, 0x32, 0x81, 0xe4, 0x4c, 0x75, 0x86, + 0x37, 0x20, 0x9f, 0x6a, 0x01, 0xc0, 0x92, 0x08, 0x1d, 0x0d, 0xe1, 0xa0, 0xe6, 0x87, 0x2c, 0xa1, + 0x4e, 0x9e, 0x82, 0x3d, 0x4b, 0x4d, 0x24, 0xc3, 0xac, 0x0b, 0x10, 0x8e, 0x49, 0xd1, 0x93, 0x9c, + 0x5e, 0x78, 0x27, 0xf3, 0x4f, 0xf2, 0xe4, 0x75, 0xc7, 0x4e, 0x72, 0x7a, 0xd9, 0x15, 0xc5, 0x55, + 0xa7, 0xa2, 0x82, 0xb3, 0x64, 0x20, 0x39, 0xfd, 0x52, 0x61, 0xc5, 0xd3, 0xfd, 0x52, 0x20, 0x1c, + 0x93, 0x62, 0x57, 0x31, 0x0b, 0x4d, 0x77, 0xba, 0xcd, 0x55, 0x4c, 0x11, 0x32, 0xae, 0x62, 0x2d, + 0x74, 0x9d, 0xfd, 0x83, 0x05, 0x38, 0xdd, 0x7e, 0xdf, 0xc6, 0x3a, 0xb4, 0x72, 0x6c, 0xb3, 0x94, + 0xd0, 0xa1, 0x71, 0x89, 0x4e, 0x8c, 0xd5, 0x75, 0xc0, 0xe1, 0x4b, 0x30, 0xa1, 0xdc, 0x11, 0xeb, + 0x6e, 0x75, 0x47, 0x4b, 0xce, 0xa9, 0x42, 0xf3, 0x54, 0x92, 0x08, 0x38, 0x5d, 0x07, 0xcd, 0xc2, + 0x98, 0x51, 0xb8, 0xbc, 0x20, 0x9e, 0xff, 0x71, 0x1a, 0x0b, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, + 0x5b, 0x70, 0x3c, 0x27, 0x0f, 0x7b, 0xd7, 0xf1, 0x74, 0xd7, 0x61, 0xac, 0x69, 0x56, 0xed, 0x10, + 0x02, 0xdc, 0xc8, 0xf6, 0xae, 0xfa, 0x9a, 0x00, 0xe0, 0x24, 0x51, 0xfb, 0x67, 0x0b, 0x70, 0xaa, + 0xad, 0x7d, 0x3d, 0xc2, 0x70, 0x6c, 0xa3, 0x11, 0x3a, 0xf3, 0x01, 0xa9, 0x11, 0x2f, 0x72, 0x9d, + 0x7a, 0xa5, 0x49, 0xaa, 0x9a, 0x16, 0x94, 0x19, 0xaa, 0x5f, 0x5a, 0xa9, 0xcc, 0xa6, 0x31, 0x70, + 0x4e, 0x4d, 0xb4, 0x04, 0x28, 0x0d, 0x11, 0x33, 0xcc, 0x9e, 0xb8, 0x69, 0x7a, 0x38, 0xa3, 0x06, + 0x7a, 0x11, 0x46, 0x94, 0xdd, 0xbe, 0x36, 0xe3, 0xec, 0x82, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8, + 0x02, 0xcf, 0x6f, 0x24, 0x32, 0x61, 0x09, 0x95, 0xe9, 0x98, 0x4c, 0x5e, 0x24, 0x8a, 0xb1, 0x8e, + 0x33, 0x77, 0xf1, 0x77, 0xbe, 0x79, 0xfa, 0x03, 0xbf, 0xff, 0xcd, 0xd3, 0x1f, 0xf8, 0xc3, 0x6f, + 0x9e, 0xfe, 0xc0, 0xf7, 0xec, 0x9d, 0xb6, 0x7e, 0x67, 0xef, 0xb4, 0xf5, 0xfb, 0x7b, 0xa7, 0xad, + 0x3f, 0xdc, 0x3b, 0x6d, 0xfd, 0xd7, 0xbd, 0xd3, 0xd6, 0x17, 0xff, 0xe4, 0xf4, 0x07, 0xde, 0x44, + 0x71, 0x84, 0xea, 0xf3, 0x74, 0x76, 0xce, 0x6f, 0x5f, 0xf8, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x67, 0xd5, 0x38, 0x2d, 0xc3, 0x23, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -7171,6 +7868,41 @@ func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *AppArmorProfile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppArmorProfile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppArmorProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LocalhostProfile != nil { + i -= len(*m.LocalhostProfile) + copy(dAtA[i:], *m.LocalhostProfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LocalhostProfile))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7457,6 +8189,18 @@ func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if m.NodeExpandSecretRef != nil { + { + size, err := m.NodeExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if m.ControllerExpandSecretRef != nil { { size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) @@ -7945,6 +8689,70 @@ func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ClusterTrustBundleProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterTrustBundleProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterTrustBundleProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x22 + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SignerName != nil { + i -= len(*m.SignerName) + copy(dAtA[i:], *m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName))) + i-- + dAtA[i] = 0x12 + } + if m.Name != nil { + i -= len(*m.Name) + copy(dAtA[i:], *m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8500,6 +9308,31 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RestartPolicy != nil { + i -= len(*m.RestartPolicy) + copy(dAtA[i:], *m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -8808,6 +9641,39 @@ func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerResizePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResizePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ContainerState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9017,6 +9883,87 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.AllocatedResourcesStatus) > 0 { + for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllocatedResourcesStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.AllocatedResources) > 0 { + keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) + for k := range m.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResources[iNdEx]) + copy(dAtA[i:], keysForAllocatedResources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } if m.Started != nil { i-- if *m.Started { @@ -9081,6 +10028,41 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9763,6 +10745,31 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if m.RestartPolicy != nil { + i -= len(*m.RestartPolicy) + copy(dAtA[i:], *m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.ResizePolicy) > 0 { + for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } if m.StartupProbe != nil { { size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) @@ -10897,6 +11904,34 @@ func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *HostIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11128,6 +12163,39 @@ func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ImageVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PullPolicy) + copy(dAtA[i:], m.PullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.Reference) + copy(dAtA[i:], m.Reference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reference))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11233,6 +12301,18 @@ func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Sleep != nil { + { + size, err := m.Sleep.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if m.TCPSocket != nil { { size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) @@ -11572,6 +12652,42 @@ func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LinuxContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x18 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.GID)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.UID)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11653,6 +12769,13 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } + if m.IPMode != nil { + i -= len(*m.IPMode) + copy(dAtA[i:], *m.IPMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode))) + i-- + dAtA[i] = 0x1a + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -11766,6 +12889,39 @@ func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ModifyVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ModifyVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ModifyVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetVolumeAttributesClassName) + copy(dAtA[i:], m.TargetVolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetVolumeAttributesClassName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12364,6 +13520,39 @@ func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *NodeFeatures) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeFeatures) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + i-- + if *m.SupplementalGroupsPolicy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12439,7 +13628,7 @@ func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NodeResources) Marshal() (dAtA []byte, err error) { +func (m *NodeRuntimeHandler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -12449,44 +13638,75 @@ func (m *NodeResources) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { +func (m *NodeRuntimeHandler) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NodeRuntimeHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { - v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Features != nil { + { + size, err := m.Features.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForCapacity[iNdEx]) - copy(dAtA[i:], keysForCapacity[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeRuntimeHandlerFeatures) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeRuntimeHandlerFeatures) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeRuntimeHandlerFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UserNamespaces != nil { + i-- + if *m.UserNamespaces { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.RecursiveReadOnlyMounts != nil { + i-- + if *m.RecursiveReadOnlyMounts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -12722,6 +13942,32 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Features != nil { + { + size, err := m.Features.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if len(m.RuntimeHandlers) > 0 { + for iNdEx := len(m.RuntimeHandlers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RuntimeHandlers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if m.Config != nil { { size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) @@ -13285,6 +14531,13 @@ func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if m.VolumeAttributesClassName != nil { + i -= len(*m.VolumeAttributesClassName) + copy(dAtA[i:], *m.VolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) + i-- + dAtA[i] = 0x4a + } if m.DataSourceRef != nil { { size, err := m.DataSourceRef.MarshalToSizedBuffer(dAtA[:i]) @@ -13382,12 +14635,48 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l - if m.ResizeStatus != nil { - i -= len(*m.ResizeStatus) - copy(dAtA[i:], *m.ResizeStatus) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResizeStatus))) + if m.ModifyVolumeStatus != nil { + { + size, err := m.ModifyVolumeStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x32 + dAtA[i] = 0x4a + } + if m.CurrentVolumeAttributesClassName != nil { + i -= len(*m.CurrentVolumeAttributesClassName) + copy(dAtA[i:], *m.CurrentVolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CurrentVolumeAttributesClassName))) + i-- + dAtA[i] = 0x42 + } + if len(m.AllocatedResourceStatuses) > 0 { + keysForAllocatedResourceStatuses := make([]string, 0, len(m.AllocatedResourceStatuses)) + for k := range m.AllocatedResourceStatuses { + keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses) + for iNdEx := len(keysForAllocatedResourceStatuses) - 1; iNdEx >= 0; iNdEx-- { + v := m.AllocatedResourceStatuses[ResourceName(keysForAllocatedResourceStatuses[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatedResourceStatuses[iNdEx]) + copy(dAtA[i:], keysForAllocatedResourceStatuses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResourceStatuses[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } } if len(m.AllocatedResources) > 0 { keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources)) @@ -13925,6 +15214,13 @@ func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.VolumeAttributesClassName != nil { + i -= len(*m.VolumeAttributesClassName) + copy(dAtA[i:], *m.VolumeAttributesClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) + i-- + dAtA[i] = 0x52 + } if m.NodeAffinity != nil { { size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) @@ -14046,6 +15342,18 @@ func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.LastPhaseTransitionTime != nil { + { + size, err := m.LastPhaseTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } i -= len(m.Reason) copy(dAtA[i:], m.Reason) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -14221,6 +15529,24 @@ func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.MismatchLabelKeys) > 0 { + for iNdEx := len(m.MismatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MismatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MismatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MismatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.MatchLabelKeys) > 0 { + for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if m.NamespaceSelector != nil { { size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -14871,6 +16197,111 @@ func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResourceClaimTemplateName != nil { + i -= len(*m.ResourceClaimTemplateName) + copy(dAtA[i:], *m.ResourceClaimTemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) + i-- + dAtA[i] = 0x22 + } + if m.ResourceClaimName != nil { + i -= len(*m.ResourceClaimName) + copy(dAtA[i:], *m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResourceClaimName != nil { + i -= len(*m.ResourceClaimName) + copy(dAtA[i:], *m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -14891,6 +16322,25 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SupplementalGroupsPolicy != nil { + i -= len(*m.SupplementalGroupsPolicy) + copy(dAtA[i:], *m.SupplementalGroupsPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SupplementalGroupsPolicy))) + i-- + dAtA[i] = 0x62 + } + if m.AppArmorProfile != nil { + { + size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.SeccompProfile != nil { { size, err := m.SeccompProfile.MarshalToSizedBuffer(dAtA[:i]) @@ -15038,6 +16488,50 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResourceClaims) > 0 { + for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + } + if len(m.SchedulingGates) > 0 { + for iNdEx := len(m.SchedulingGates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SchedulingGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb2 + } + } + if m.HostUsers != nil { + i-- + if *m.HostUsers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa8 + } if m.OS != nil { { size, err := m.OS.MarshalToSizedBuffer(dAtA[:i]) @@ -15468,6 +16962,41 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.HostIPs) > 0 { + for iNdEx := len(m.HostIPs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if len(m.ResourceClaimStatuses) > 0 { + for iNdEx := len(m.ResourceClaimStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaimStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + i -= len(m.Resize) + copy(dAtA[i:], m.Resize) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resize))) + i-- + dAtA[i] = 0x72 if len(m.EphemeralContainerStatuses) > 0 { for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { { @@ -16655,6 +18184,39 @@ func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16698,6 +18260,39 @@ func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceHealth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHealth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Health) + copy(dAtA[i:], m.Health) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Health))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceID) + copy(dAtA[i:], m.ResourceID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -16972,6 +18567,20 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Claims) > 0 { + for iNdEx := len(m.Claims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Claims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.Requests) > 0 { keysForRequests := make([]string, 0, len(m.Requests)) for k := range m.Requests { @@ -17033,6 +18642,48 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -17765,6 +19416,18 @@ func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AppArmorProfile != nil { + { + size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } if m.SeccompProfile != nil { { size, err := m.SeccompProfile.MarshalToSizedBuffer(dAtA[:i]) @@ -18266,6 +19929,15 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TrafficDistribution != nil { + i -= len(*m.TrafficDistribution) + copy(dAtA[i:], *m.TrafficDistribution) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TrafficDistribution))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } if m.InternalTrafficPolicy != nil { i -= len(*m.InternalTrafficPolicy) copy(dAtA[i:], *m.InternalTrafficPolicy) @@ -18521,6 +20193,32 @@ func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SleepAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SleepAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -18900,6 +20598,34 @@ func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error _ = i var l int _ = l + if len(m.MatchLabelKeys) > 0 { + for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MatchLabelKeys[iNdEx]) + copy(dAtA[i:], m.MatchLabelKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.NodeTaintsPolicy != nil { + i -= len(*m.NodeTaintsPolicy) + copy(dAtA[i:], *m.NodeTaintsPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeTaintsPolicy))) + i-- + dAtA[i] = 0x3a + } + if m.NodeAffinityPolicy != nil { + i -= len(*m.NodeAffinityPolicy) + copy(dAtA[i:], *m.NodeAffinityPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeAffinityPolicy))) + i-- + dAtA[i] = 0x32 + } + if m.MinDomains != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinDomains)) + i-- + dAtA[i] = 0x28 + } if m.LabelSelector != nil { { size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -18968,6 +20694,53 @@ func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } +func (m *TypedObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypedObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypedObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Namespace != nil { + i -= len(*m.Namespace) + copy(dAtA[i:], *m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace))) + i-- + dAtA[i] = 0x22 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Volume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -19059,6 +20832,13 @@ func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RecursiveReadOnly != nil { + i -= len(*m.RecursiveReadOnly) + copy(dAtA[i:], *m.RecursiveReadOnly) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RecursiveReadOnly))) + i-- + dAtA[i] = 0x3a + } i -= len(m.SubPathExpr) copy(dAtA[i:], m.SubPathExpr) i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) @@ -19097,6 +20877,54 @@ func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VolumeMountStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeMountStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeMountStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RecursiveReadOnly != nil { + i -= len(*m.RecursiveReadOnly) + copy(dAtA[i:], *m.RecursiveReadOnly) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RecursiveReadOnly))) + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -19152,6 +20980,18 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ClusterTrustBundle != nil { + { + size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.ServiceAccountToken != nil { { size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) @@ -19203,6 +21043,87 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VolumeResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *VolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -19223,6 +21144,20 @@ func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } if m.Ephemeral != nil { { size, err := m.Ephemeral.MarshalToSizedBuffer(dAtA[:i]) @@ -19782,6 +21717,21 @@ func (m *Affinity) Size() (n int) { return n } +func (m *AppArmorProfile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.LocalhostProfile != nil { + l = len(*m.LocalhostProfile) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *AttachedVolume) Size() (n int) { if m == nil { return 0 @@ -19920,6 +21870,10 @@ func (m *CSIPersistentVolumeSource) Size() (n int) { l = m.ControllerExpandSecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.NodeExpandSecretRef != nil { + l = m.NodeExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -20074,6 +22028,32 @@ func (m *ClientIPConfig) Size() (n int) { return n } +func (m *ClusterTrustBundleProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SignerName != nil { + l = len(*m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + func (m *ComponentCondition) Size() (n int) { if m == nil { return 0 @@ -20353,6 +22333,16 @@ func (m *Container) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RestartPolicy != nil { + l = len(*m.RestartPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -20389,6 +22379,19 @@ func (m *ContainerPort) Size() (n int) { return n } +func (m *ContainerResizePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ContainerState) Size() (n int) { if m == nil { return 0 @@ -20478,6 +22481,48 @@ func (m *ContainerStatus) Size() (n int) { if m.Started != nil { n += 2 } + if len(m.AllocatedResources) > 0 { + for k, v := range m.AllocatedResources { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllocatedResourcesStatus) > 0 { + for _, e := range m.AllocatedResourcesStatus { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -20820,6 +22865,16 @@ func (m *EphemeralContainerCommon) Size() (n int) { l = m.StartupProbe.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ResizePolicy) > 0 { + for _, e := range m.ResizePolicy { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RestartPolicy != nil { + l = len(*m.RestartPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -21156,6 +23211,17 @@ func (m *HostAlias) Size() (n int) { return n } +func (m *HostIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *HostPathVolumeSource) Size() (n int) { if m == nil { return 0 @@ -21241,6 +23307,19 @@ func (m *ISCSIVolumeSource) Size() (n int) { return n } +func (m *ImageVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Reference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *KeyToPath) Size() (n int) { if m == nil { return 0 @@ -21292,6 +23371,10 @@ func (m *LifecycleHandler) Size() (n int) { l = m.TCPSocket.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Sleep != nil { + l = m.Sleep.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -21396,6 +23479,22 @@ func (m *LimitRangeSpec) Size() (n int) { return n } +func (m *LinuxContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.UID)) + n += 1 + sovGenerated(uint64(m.GID)) + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + func (m *List) Size() (n int) { if m == nil { return 0 @@ -21423,6 +23522,10 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if m.IPMode != nil { + l = len(*m.IPMode) + n += 1 + l + sovGenerated(uint64(l)) + } if len(m.Ports) > 0 { for _, e := range m.Ports { l = e.Size() @@ -21473,6 +23576,19 @@ func (m *LocalVolumeSource) Size() (n int) { return n } +func (m *ModifyVolumeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetVolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *NFSVolumeSource) Size() (n int) { if m == nil { return 0 @@ -21685,6 +23801,18 @@ func (m *NodeDaemonEndpoints) Size() (n int) { return n } +func (m *NodeFeatures) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + n += 2 + } + return n +} + func (m *NodeList) Size() (n int) { if m == nil { return 0 @@ -21713,20 +23841,32 @@ func (m *NodeProxyOptions) Size() (n int) { return n } -func (m *NodeResources) Size() (n int) { +func (m *NodeRuntimeHandler) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Features != nil { + l = m.Features.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NodeRuntimeHandlerFeatures) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RecursiveReadOnlyMounts != nil { + n += 2 + } + if m.UserNamespaces != nil { + n += 2 } return n } @@ -21882,6 +24022,16 @@ func (m *NodeStatus) Size() (n int) { l = m.Config.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.RuntimeHandlers) > 0 { + for _, e := range m.RuntimeHandlers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Features != nil { + l = m.Features.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22054,6 +24204,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = m.DataSourceRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeAttributesClassName != nil { + l = len(*m.VolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22095,8 +24249,20 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.ResizeStatus != nil { - l = len(*m.ResizeStatus) + if len(m.AllocatedResourceStatuses) > 0 { + for k, v := range m.AllocatedResourceStatuses { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.CurrentVolumeAttributesClassName != nil { + l = len(*m.CurrentVolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ModifyVolumeStatus != nil { + l = m.ModifyVolumeStatus.Size() n += 1 + l + sovGenerated(uint64(l)) } return n @@ -22286,6 +24452,10 @@ func (m *PersistentVolumeSpec) Size() (n int) { l = m.NodeAffinity.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeAttributesClassName != nil { + l = len(*m.VolumeAttributesClassName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22301,6 +24471,10 @@ func (m *PersistentVolumeStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Reason) n += 1 + l + sovGenerated(uint64(l)) + if m.LastPhaseTransitionTime != nil { + l = m.LastPhaseTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22375,6 +24549,18 @@ func (m *PodAffinityTerm) Size() (n int) { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.MatchLabelKeys) > 0 { + for _, s := range m.MatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MismatchLabelKeys) > 0 { + for _, s := range m.MismatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -22601,6 +24787,51 @@ func (m *PodReadinessGate) Size() (n int) { return n } +func (m *PodResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResourceClaimName != nil { + l = len(*m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceClaimTemplateName != nil { + l = len(*m.ResourceClaimTemplateName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.ResourceClaimName != nil { + l = len(*m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSchedulingGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodSecurityContext) Size() (n int) { if m == nil { return 0 @@ -22646,6 +24877,14 @@ func (m *PodSecurityContext) Size() (n int) { l = m.SeccompProfile.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.AppArmorProfile != nil { + l = m.AppArmorProfile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SupplementalGroupsPolicy != nil { + l = len(*m.SupplementalGroupsPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22805,6 +25044,21 @@ func (m *PodSpec) Size() (n int) { l = m.OS.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.HostUsers != nil { + n += 3 + } + if len(m.SchedulingGates) > 0 { + for _, e := range m.SchedulingGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceClaims) > 0 { + for _, e := range m.ResourceClaims { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -22862,6 +25116,20 @@ func (m *PodStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.Resize) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ResourceClaimStatuses) > 0 { + for _, e := range m.ResourceClaimStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostIPs) > 0 { + for _, e := range m.HostIPs { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23248,6 +25516,19 @@ func (m *ReplicationControllerStatus) Size() (n int) { return n } +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ResourceFieldSelector) Size() (n int) { if m == nil { return 0 @@ -23263,6 +25544,19 @@ func (m *ResourceFieldSelector) Size() (n int) { return n } +func (m *ResourceHealth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Health) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ResourceQuota) Size() (n int) { if m == nil { return 0 @@ -23374,6 +25668,29 @@ func (m *ResourceRequirements) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if len(m.Claims) > 0 { + for _, e := range m.Claims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -23684,6 +26001,10 @@ func (m *SecurityContext) Size() (n int) { l = m.SeccompProfile.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.AppArmorProfile != nil { + l = m.AppArmorProfile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -23898,6 +26219,10 @@ func (m *ServiceSpec) Size() (n int) { l = len(*m.InternalTrafficPolicy) n += 2 + l + sovGenerated(uint64(l)) } + if m.TrafficDistribution != nil { + l = len(*m.TrafficDistribution) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -23931,6 +26256,16 @@ func (m *SessionAffinityConfig) Size() (n int) { return n } +func (m *SleepAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + return n +} + func (m *StorageOSPersistentVolumeSource) Size() (n int) { if m == nil { return 0 @@ -24083,6 +26418,23 @@ func (m *TopologySpreadConstraint) Size() (n int) { l = m.LabelSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.MinDomains != nil { + n += 1 + sovGenerated(uint64(*m.MinDomains)) + } + if m.NodeAffinityPolicy != nil { + l = len(*m.NodeAffinityPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeTaintsPolicy != nil { + l = len(*m.NodeTaintsPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchLabelKeys) > 0 { + for _, s := range m.MatchLabelKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -24103,6 +26455,27 @@ func (m *TypedLocalObjectReference) Size() (n int) { return n } +func (m *TypedObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Namespace != nil { + l = len(*m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Volume) Size() (n int) { if m == nil { return 0 @@ -24148,6 +26521,28 @@ func (m *VolumeMount) Size() (n int) { } l = len(m.SubPathExpr) n += 1 + l + sovGenerated(uint64(l)) + if m.RecursiveReadOnly != nil { + l = len(*m.RecursiveReadOnly) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeMountStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.RecursiveReadOnly != nil { + l = len(*m.RecursiveReadOnly) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24186,6 +26581,37 @@ func (m *VolumeProjection) Size() (n int) { l = m.ServiceAccountToken.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ClusterTrustBundle != nil { + l = m.ClusterTrustBundle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -24311,6 +26737,10 @@ func (m *VolumeSource) Size() (n int) { l = m.Ephemeral.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.Image != nil { + l = m.Image.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -24398,6 +26828,17 @@ func (this *Affinity) String() string { }, "") return s } +func (this *AppArmorProfile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppArmorProfile{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `LocalhostProfile:` + valueToStringGenerated(this.LocalhostProfile) + `,`, + `}`, + }, "") + return s +} func (this *AttachedVolume) String() string { if this == nil { return "nil" @@ -24499,6 +26940,7 @@ func (this *CSIPersistentVolumeSource) String() string { `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodeExpandSecretRef:` + strings.Replace(this.NodeExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, `}`, }, "") return s @@ -24604,6 +27046,20 @@ func (this *ClientIPConfig) String() string { }, "") return s } +func (this *ClusterTrustBundleProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterTrustBundleProjection{`, + `Name:` + valueToStringGenerated(this.Name) + `,`, + `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} func (this *ComponentCondition) String() string { if this == nil { return "nil" @@ -24799,6 +27255,11 @@ func (this *Container) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&Container{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -24822,6 +27283,8 @@ func (this *Container) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, + `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, `}`, }, "") return s @@ -24851,6 +27314,17 @@ func (this *ContainerPort) String() string { }, "") return s } +func (this *ContainerResizePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResizePolicy{`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `}`, + }, "") + return s +} func (this *ContainerState) String() string { if this == nil { return "nil" @@ -24904,6 +27378,26 @@ func (this *ContainerStatus) String() string { if this == nil { return "nil" } + repeatedStringForVolumeMounts := "[]VolumeMountStatus{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMountStatus", "VolumeMountStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForAllocatedResourcesStatus := "[]ResourceStatus{" + for _, f := range this.AllocatedResourcesStatus { + repeatedStringForAllocatedResourcesStatus += strings.Replace(strings.Replace(f.String(), "ResourceStatus", "ResourceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForAllocatedResourcesStatus += "}" + keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) + for k := range this.AllocatedResources { + keysForAllocatedResources = append(keysForAllocatedResources, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources) + mapStringForAllocatedResources := "ResourceList{" + for _, k := range keysForAllocatedResources { + mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) + } + mapStringForAllocatedResources += "}" s := strings.Join([]string{`&ContainerStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, @@ -24914,6 +27408,21 @@ func (this *ContainerStatus) String() string { `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, `Started:` + valueToStringGenerated(this.Started) + `,`, + `AllocatedResources:` + mapStringForAllocatedResources + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`, + `AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`, + `}`, + }, "") + return s +} +func (this *ContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerUser{`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerUser", "LinuxContainerUser", 1) + `,`, `}`, }, "") return s @@ -25145,6 +27654,11 @@ func (this *EphemeralContainerCommon) String() string { repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," } repeatedStringForVolumeDevices += "}" + repeatedStringForResizePolicy := "[]ContainerResizePolicy{" + for _, f := range this.ResizePolicy { + repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForResizePolicy += "}" s := strings.Join([]string{`&EphemeralContainerCommon{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, @@ -25168,6 +27682,8 @@ func (this *EphemeralContainerCommon) String() string { `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `ResizePolicy:` + repeatedStringForResizePolicy + `,`, + `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`, `}`, }, "") return s @@ -25429,6 +27945,16 @@ func (this *HostAlias) String() string { }, "") return s } +func (this *HostIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostIP{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} func (this *HostPathVolumeSource) String() string { if this == nil { return "nil" @@ -25480,6 +28006,17 @@ func (this *ISCSIVolumeSource) String() string { }, "") return s } +func (this *ImageVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageVolumeSource{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `}`, + }, "") + return s +} func (this *KeyToPath) String() string { if this == nil { return "nil" @@ -25511,6 +28048,7 @@ func (this *LifecycleHandler) String() string { `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `Sleep:` + strings.Replace(this.Sleep.String(), "SleepAction", "SleepAction", 1) + `,`, `}`, }, "") return s @@ -25622,6 +28160,18 @@ func (this *LimitRangeSpec) String() string { }, "") return s } +func (this *LinuxContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxContainerUser{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `}`, + }, "") + return s +} func (this *List) String() string { if this == nil { return "nil" @@ -25650,6 +28200,7 @@ func (this *LoadBalancerIngress) String() string { s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `IPMode:` + valueToStringGenerated(this.IPMode) + `,`, `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") @@ -25691,6 +28242,17 @@ func (this *LocalVolumeSource) String() string { }, "") return s } +func (this *ModifyVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ModifyVolumeStatus{`, + `TargetVolumeAttributesClassName:` + fmt.Sprintf("%v", this.TargetVolumeAttributesClassName) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} func (this *NFSVolumeSource) String() string { if this == nil { return "nil" @@ -25858,6 +28420,16 @@ func (this *NodeDaemonEndpoints) String() string { }, "") return s } +func (this *NodeFeatures) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeFeatures{`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, + `}`, + }, "") + return s +} func (this *NodeList) String() string { if this == nil { return "nil" @@ -25884,22 +28456,24 @@ func (this *NodeProxyOptions) String() string { }, "") return s } -func (this *NodeResources) String() string { +func (this *NodeRuntimeHandler) String() string { if this == nil { return "nil" } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + s := strings.Join([]string{`&NodeRuntimeHandler{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Features:` + strings.Replace(this.Features.String(), "NodeRuntimeHandlerFeatures", "NodeRuntimeHandlerFeatures", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeRuntimeHandlerFeatures) String() string { + if this == nil { + return "nil" } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, + s := strings.Join([]string{`&NodeRuntimeHandlerFeatures{`, + `RecursiveReadOnlyMounts:` + valueToStringGenerated(this.RecursiveReadOnlyMounts) + `,`, + `UserNamespaces:` + valueToStringGenerated(this.UserNamespaces) + `,`, `}`, }, "") return s @@ -25997,6 +28571,11 @@ func (this *NodeStatus) String() string { repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," } repeatedStringForVolumesAttached += "}" + repeatedStringForRuntimeHandlers := "[]NodeRuntimeHandler{" + for _, f := range this.RuntimeHandlers { + repeatedStringForRuntimeHandlers += strings.Replace(strings.Replace(f.String(), "NodeRuntimeHandler", "NodeRuntimeHandler", 1), `&`, ``, 1) + "," + } + repeatedStringForRuntimeHandlers += "}" keysForCapacity := make([]string, 0, len(this.Capacity)) for k := range this.Capacity { keysForCapacity = append(keysForCapacity, string(k)) @@ -26029,6 +28608,8 @@ func (this *NodeStatus) String() string { `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `RuntimeHandlers:` + repeatedStringForRuntimeHandlers + `,`, + `Features:` + strings.Replace(this.Features.String(), "NodeFeatures", "NodeFeatures", 1) + `,`, `}`, }, "") return s @@ -26140,13 +28721,14 @@ func (this *PersistentVolumeClaimSpec) String() string { } s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "VolumeResourceRequirements", "VolumeResourceRequirements", 1), `&`, ``, 1) + `,`, `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedObjectReference", "TypedObjectReference", 1) + `,`, + `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -26180,13 +28762,25 @@ func (this *PersistentVolumeClaimStatus) String() string { mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)]) } mapStringForAllocatedResources += "}" + keysForAllocatedResourceStatuses := make([]string, 0, len(this.AllocatedResourceStatuses)) + for k := range this.AllocatedResourceStatuses { + keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses) + mapStringForAllocatedResourceStatuses := "map[ResourceName]ClaimResourceStatus{" + for _, k := range keysForAllocatedResourceStatuses { + mapStringForAllocatedResourceStatuses += fmt.Sprintf("%v: %v,", k, this.AllocatedResourceStatuses[ResourceName(k)]) + } + mapStringForAllocatedResourceStatuses += "}" s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, `Capacity:` + mapStringForCapacity + `,`, `Conditions:` + repeatedStringForConditions + `,`, `AllocatedResources:` + mapStringForAllocatedResources + `,`, - `ResizeStatus:` + valueToStringGenerated(this.ResizeStatus) + `,`, + `AllocatedResourceStatuses:` + mapStringForAllocatedResourceStatuses + `,`, + `CurrentVolumeAttributesClassName:` + valueToStringGenerated(this.CurrentVolumeAttributesClassName) + `,`, + `ModifyVolumeStatus:` + strings.Replace(this.ModifyVolumeStatus.String(), "ModifyVolumeStatus", "ModifyVolumeStatus", 1) + `,`, `}`, }, "") return s @@ -26284,6 +28878,7 @@ func (this *PersistentVolumeSpec) String() string { `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -26296,6 +28891,7 @@ func (this *PersistentVolumeStatus) String() string { `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `LastPhaseTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastPhaseTransitionTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -26353,6 +28949,8 @@ func (this *PodAffinityTerm) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`, + `MismatchLabelKeys:` + fmt.Sprintf("%v", this.MismatchLabelKeys) + `,`, `}`, }, "") return s @@ -26534,6 +29132,39 @@ func (this *PodReadinessGate) String() string { }, "") return s } +func (this *PodResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodResourceClaim{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, + `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, + `}`, + }, "") + return s +} +func (this *PodResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodResourceClaimStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingGate{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *PodSecurityContext) String() string { if this == nil { return "nil" @@ -26554,6 +29185,8 @@ func (this *PodSecurityContext) String() string { `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, `FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`, `SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`, + `AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, `}`, }, "") return s @@ -26617,6 +29250,16 @@ func (this *PodSpec) String() string { repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," } repeatedStringForEphemeralContainers += "}" + repeatedStringForSchedulingGates := "[]PodSchedulingGate{" + for _, f := range this.SchedulingGates { + repeatedStringForSchedulingGates += strings.Replace(strings.Replace(f.String(), "PodSchedulingGate", "PodSchedulingGate", 1), `&`, ``, 1) + "," + } + repeatedStringForSchedulingGates += "}" + repeatedStringForResourceClaims := "[]PodResourceClaim{" + for _, f := range this.ResourceClaims { + repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "PodResourceClaim", "PodResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceClaims += "}" keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) for k := range this.NodeSelector { keysForNodeSelector = append(keysForNodeSelector, k) @@ -26674,6 +29317,9 @@ func (this *PodSpec) String() string { `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, `SetHostnameAsFQDN:` + valueToStringGenerated(this.SetHostnameAsFQDN) + `,`, `OS:` + strings.Replace(this.OS.String(), "PodOS", "PodOS", 1) + `,`, + `HostUsers:` + valueToStringGenerated(this.HostUsers) + `,`, + `SchedulingGates:` + repeatedStringForSchedulingGates + `,`, + `ResourceClaims:` + repeatedStringForResourceClaims + `,`, `}`, }, "") return s @@ -26707,6 +29353,16 @@ func (this *PodStatus) String() string { repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," } repeatedStringForEphemeralContainerStatuses += "}" + repeatedStringForResourceClaimStatuses := "[]PodResourceClaimStatus{" + for _, f := range this.ResourceClaimStatuses { + repeatedStringForResourceClaimStatuses += strings.Replace(strings.Replace(f.String(), "PodResourceClaimStatus", "PodResourceClaimStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceClaimStatuses += "}" + repeatedStringForHostIPs := "[]HostIP{" + for _, f := range this.HostIPs { + repeatedStringForHostIPs += strings.Replace(strings.Replace(f.String(), "HostIP", "HostIP", 1), `&`, ``, 1) + "," + } + repeatedStringForHostIPs += "}" s := strings.Join([]string{`&PodStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `Conditions:` + repeatedStringForConditions + `,`, @@ -26721,6 +29377,9 @@ func (this *PodStatus) String() string { `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, `PodIPs:` + repeatedStringForPodIPs + `,`, `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `Resize:` + fmt.Sprintf("%v", this.Resize) + `,`, + `ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`, + `HostIPs:` + repeatedStringForHostIPs + `,`, `}`, }, "") return s @@ -27023,6 +29682,17 @@ func (this *ReplicationControllerStatus) String() string { }, "") return s } +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `}`, + }, "") + return s +} func (this *ResourceFieldSelector) String() string { if this == nil { return "nil" @@ -27035,6 +29705,17 @@ func (this *ResourceFieldSelector) String() string { }, "") return s } +func (this *ResourceHealth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHealth{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `Health:` + fmt.Sprintf("%v", this.Health) + `,`, + `}`, + }, "") + return s +} func (this *ResourceQuota) String() string { if this == nil { return "nil" @@ -27120,6 +29801,11 @@ func (this *ResourceRequirements) String() string { if this == nil { return "nil" } + repeatedStringForClaims := "[]ResourceClaim{" + for _, f := range this.Claims { + repeatedStringForClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForClaims += "}" keysForLimits := make([]string, 0, len(this.Limits)) for k := range this.Limits { keysForLimits = append(keysForLimits, string(k)) @@ -27143,6 +29829,23 @@ func (this *ResourceRequirements) String() string { s := strings.Join([]string{`&ResourceRequirements{`, `Limits:` + mapStringForLimits + `,`, `Requests:` + mapStringForRequests + `,`, + `Claims:` + repeatedStringForClaims + `,`, + `}`, + }, "") + return s +} +func (this *ResourceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]ResourceHealth{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceHealth", "ResourceHealth", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&ResourceStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Resources:` + repeatedStringForResources + `,`, `}`, }, "") return s @@ -27371,6 +30074,7 @@ func (this *SecurityContext) String() string { `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, `SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`, + `AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`, `}`, }, "") return s @@ -27528,6 +30232,7 @@ func (this *ServiceSpec) String() string { `AllocateLoadBalancerNodePorts:` + valueToStringGenerated(this.AllocateLoadBalancerNodePorts) + `,`, `LoadBalancerClass:` + valueToStringGenerated(this.LoadBalancerClass) + `,`, `InternalTrafficPolicy:` + valueToStringGenerated(this.InternalTrafficPolicy) + `,`, + `TrafficDistribution:` + valueToStringGenerated(this.TrafficDistribution) + `,`, `}`, }, "") return s @@ -27558,6 +30263,16 @@ func (this *SessionAffinityConfig) String() string { }, "") return s } +func (this *SleepAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SleepAction{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `}`, + }, "") + return s +} func (this *StorageOSPersistentVolumeSource) String() string { if this == nil { return "nil" @@ -27670,6 +30385,10 @@ func (this *TopologySpreadConstraint) String() string { `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MinDomains:` + valueToStringGenerated(this.MinDomains) + `,`, + `NodeAffinityPolicy:` + valueToStringGenerated(this.NodeAffinityPolicy) + `,`, + `NodeTaintsPolicy:` + valueToStringGenerated(this.NodeTaintsPolicy) + `,`, + `MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`, `}`, }, "") return s @@ -27686,6 +30405,19 @@ func (this *TypedLocalObjectReference) String() string { }, "") return s } +func (this *TypedObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypedObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + valueToStringGenerated(this.Namespace) + `,`, + `}`, + }, "") + return s +} func (this *Volume) String() string { if this == nil { return "nil" @@ -27719,6 +30451,20 @@ func (this *VolumeMount) String() string { `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, + `RecursiveReadOnly:` + valueToStringGenerated(this.RecursiveReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMountStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMountStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `RecursiveReadOnly:` + valueToStringGenerated(this.RecursiveReadOnly) + `,`, `}`, }, "") return s @@ -27742,6 +30488,38 @@ func (this *VolumeProjection) String() string { `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&VolumeResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, `}`, }, "") return s @@ -27780,6 +30558,7 @@ func (this *VolumeSource) String() string { `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, `Ephemeral:` + strings.Replace(this.Ephemeral.String(), "EphemeralVolumeSource", "EphemeralVolumeSource", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ImageVolumeSource", "ImageVolumeSource", 1) + `,`, `}`, }, "") return s @@ -28140,6 +30919,121 @@ func (m *Affinity) Unmarshal(dAtA []byte) error { } return nil } +func (m *AppArmorProfile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppArmorProfile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppArmorProfile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = AppArmorProfileType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalhostProfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.LocalhostProfile = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AttachedVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -29405,6 +32299,42 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeExpandSecretRef == nil { + m.NodeExpandSecretRef = &SecretReference{} + } + if err := m.NodeExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30717,7 +33647,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30740,15 +33670,15 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterTrustBundleProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterTrustBundleProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30776,11 +33706,12 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30808,13 +33739,14 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.SignerName = &s iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30824,27 +33756,230 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33079,6 +36214,73 @@ func (m *Container) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) + m.RestartPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33385,6 +36587,120 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } +func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -34273,61 +37589,11 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Started = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType) } - m.Port = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34337,64 +37603,124 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthGenerated } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.AllocatedResources == nil { + m.AllocatedResources = make(ResourceList) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.AllocatedResources[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34421,66 +37747,18 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Resources == nil { + m.Resources = &ResourceRequirements{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34490,27 +37768,29 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.VolumeMounts = append(m.VolumeMounts, VolumeMountStatus{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34537,16 +37817,16 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} + if m.User == nil { + m.User = &ContainerUser{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34573,33 +37853,11 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{}) + if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34621,7 +37879,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ContainerUser) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34644,15 +37902,15 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerUser: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34679,31 +37937,13 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Linux == nil { + m.Linux = &LinuxContainerUser{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34725,7 +37965,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34748,17 +37988,17 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34768,27 +38008,64 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34815,10 +38092,8 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SizeLimit == nil { - m.SizeLimit = &resource.Quantity{} - } - if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34843,7 +38118,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointAddress) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34866,15 +38141,15 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34902,11 +38177,11 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34933,18 +38208,18 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34954,29 +38229,33 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34986,25 +38265,12 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s - iNdEx = postIndex + m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -35026,7 +38292,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35049,17 +38315,17 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35069,29 +38335,31 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) } - m.Port = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35101,14 +38369,65 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 3: + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35136,13 +38455,13 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35152,24 +38471,27 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.AppProtocol = &s + if m.SizeLimit == nil { + m.SizeLimit = &resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -35192,7 +38514,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35215,17 +38537,17 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35235,29 +38557,27 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35284,16 +38604,18 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35303,25 +38625,56 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -35344,7 +38697,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } return nil } -func (m *Endpoints) Unmarshal(dAtA []byte) error { +func (m *EndpointPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35367,17 +38720,17 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35387,30 +38740,29 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var msglen int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35420,81 +38772,16 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35504,30 +38791,29 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35537,25 +38823,24 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s iNdEx = postIndex default: iNdEx = preIndex @@ -35578,7 +38863,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35601,17 +38886,17 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35621,27 +38906,29 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Prefix = string(dAtA[iNdEx:postIndex]) + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35668,16 +38955,14 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} - } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35704,10 +38989,8 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35732,7 +39015,395 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVar) Unmarshal(dAtA []byte) error { +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36919,6 +40590,73 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{}) + if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ContainerRestartPolicy(dAtA[iNdEx:postIndex]) + m.RestartPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -40009,7 +43747,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { +func (m *HostIP) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40032,47 +43770,15 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: HostIP: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40100,8 +43806,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := HostPathType(dAtA[iNdEx:postIndex]) - m.Type = &s + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -40124,7 +43829,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40147,182 +43852,15 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetPortal = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IQN = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - m.Lun = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lun |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40350,87 +43888,11 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DiscoveryCHAPAuth = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SessionCHAPAuth = bool(v != 0) - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40458,8 +43920,8 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.InitiatorName = &s + s := HostPathType(dAtA[iNdEx:postIndex]) + m.Type = &s iNdEx = postIndex default: iNdEx = preIndex @@ -40482,7 +43944,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40505,10 +43967,10 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -40760,7 +44222,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} + m.SecretRef = &SecretReference{} } if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -40840,7 +44302,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *KeyToPath) Unmarshal(dAtA []byte) error { +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40863,15 +44325,15 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40899,11 +44361,11 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.TargetPortal = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40931,13 +44393,13 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.IQN = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) } - var v int32 + m.Lun = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40947,67 +44409,100 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.Lun |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Mode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lifecycle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } - if iNdEx >= l { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41017,31 +44512,470 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PostStart == nil { - m.PostStart = &LifecycleHandler{} + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) } - if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyToPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PostStart == nil { + m.PostStart = &LifecycleHandler{} + } + if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -41233,6 +45167,42 @@ func (m *LifecycleHandler) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sleep", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sleep == nil { + m.Sleep = &SleepAction{} + } + if err := m.Sleep.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42298,6 +46268,170 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *LinuxContainerUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + m.UID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + m.GID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *List) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -42508,6 +46642,39 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := LoadBalancerIPMode(dAtA[iNdEx:postIndex]) + m.IPMode = &s + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) @@ -42844,7 +47011,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -42867,15 +47034,15 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ModifyVolumeStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ModifyVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetVolumeAttributesClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -42903,11 +47070,11 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(dAtA[iNdEx:postIndex]) + m.TargetVolumeAttributesClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -42935,28 +47102,8 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Status = PersistentVolumeClaimModifyVolumeStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42978,7 +47125,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Namespace) Unmarshal(dAtA []byte) error { +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -43001,17 +47148,17 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43021,30 +47168,29 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Server = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43054,30 +47200,29 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -43087,25 +47232,161 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44639,7 +48920,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeList) Unmarshal(dAtA []byte) error { +func (m *NodeFeatures) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44662,50 +48943,17 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + return fmt.Errorf("proto: NodeFeatures: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeFeatures: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44715,26 +48963,13 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Node{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.SupplementalGroupsPolicy = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44756,7 +48991,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { +func (m *NodeList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44779,17 +49014,17 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: NodeList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44799,23 +49034,58 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Node{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -44838,7 +49108,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *NodeResources) Unmarshal(dAtA []byte) error { +func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44861,15 +49131,129 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeRuntimeHandler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeRuntimeHandler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeRuntimeHandler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -44896,106 +49280,105 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Capacity == nil { - m.Capacity = make(ResourceList) + if m.Features == nil { + m.Features = &NodeRuntimeHandlerFeatures{} } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeRuntimeHandlerFeatures) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeRuntimeHandlerFeatures: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeRuntimeHandlerFeatures: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnlyMounts", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Capacity[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex + b := bool(v != 0) + m.RecursiveReadOnlyMounts = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaces", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.UserNamespaces = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -46222,6 +50605,76 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandlers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandlers = append(m.RuntimeHandlers, NodeRuntimeHandler{}) + if err := m.RuntimeHandlers[len(m.RuntimeHandlers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Features == nil { + m.Features = &NodeFeatures{} + } + if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47954,12 +52407,45 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.DataSourceRef == nil { - m.DataSourceRef = &TypedLocalObjectReference{} + m.DataSourceRef = &TypedObjectReference{} } if err := m.DataSourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeAttributesClassName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -48366,11 +52852,11 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } m.AllocatedResources[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex - case 6: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResizeStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourceStatuses", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48380,80 +52866,124 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := PersistentVolumeClaimResizeStatus(dAtA[iNdEx:postIndex]) - m.ResizeStatus = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.AllocatedResourceStatuses == nil { + m.AllocatedResourceStatuses = make(map[ResourceName]ClaimResourceStatus) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey ResourceName + var mapvalue ClaimResourceStatus + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = ClaimResourceStatus(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.AllocatedResourceStatuses[ResourceName(mapkey)] = ((ClaimResourceStatus)(mapvalue)) + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentVolumeAttributesClassName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48463,28 +52993,147 @@ func (m *PersistentVolumeClaimTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.CurrentVolumeAttributesClassName = &s iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ModifyVolumeStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ModifyVolumeStatus == nil { + m.ModifyVolumeStatus = &ModifyVolumeStatus{} + } + if err := m.ModifyVolumeStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -50021,6 +54670,39 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VolumeAttributesClassName = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50167,6 +54849,42 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastPhaseTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastPhaseTransitionTime == nil { + m.LastPhaseTransitionTime = &v1.Time{} + } + if err := m.LastPhaseTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50734,61 +55452,11 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -50798,31 +55466,29 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MismatchLabelKeys", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -50832,25 +55498,141 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.MismatchLabelKeys = append(m.MismatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -52565,7 +57347,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { +func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -52588,17 +57370,17 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + return fmt.Errorf("proto: PodResourceClaim: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -52608,224 +57390,29 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsUser = &v case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.RunAsNonRoot = &b - case 4: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SupplementalGroups = append(m.SupplementalGroups, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.SupplementalGroups) == 0 { - m.SupplementalGroups = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SupplementalGroups = append(m.SupplementalGroups, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FSGroup = &v - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsGroup = &v - case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sysctls = append(m.Sysctls, Sysctl{}) - if err := m.Sysctls[len(m.Sysctls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) - } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -52835,31 +57422,571 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.WindowsOptions == nil { - m.WindowsOptions = &WindowsSecurityContextOptions{} - } - if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimName = &s iNdEx = postIndex - case 9: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroupChangePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimTemplateName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingGate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingGate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingGate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 4: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FSGroup = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsGroup = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sysctls = append(m.Sysctls, Sysctl{}) + if err := m.Sysctls[len(m.Sysctls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroupChangePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -52926,6 +58053,75 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppArmorProfile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppArmorProfile == nil { + m.AppArmorProfile = &AppArmorProfile{} + } + if err := m.AppArmorProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex]) + m.SupplementalGroupsPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -54326,6 +59522,95 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostUsers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.HostUsers = &b + case 38: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulingGates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulingGates = append(m.SchedulingGates, PodSchedulingGate{}) + if err := m.SchedulingGates[len(m.SchedulingGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 39: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceClaims = append(m.ResourceClaims, PodResourceClaim{}) + if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -54635,46 +59920,146 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) - if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) + if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) + if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIPs = append(m.PodIPs, PodIP{}) + if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -54701,14 +60086,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) - if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) + if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -54736,11 +60121,11 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) + m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimStatuses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -54767,14 +60152,14 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PodIPs = append(m.PodIPs, PodIP{}) - if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResourceClaimStatuses = append(m.ResourceClaimStatuses, PodResourceClaimStatus{}) + if err := m.ResourceClaimStatuses[len(m.ResourceClaimStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostIPs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -54801,8 +60186,8 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) - if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.HostIPs = append(m.HostIPs, HostIP{}) + if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -58304,6 +63689,120 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -58451,6 +63950,120 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceHealth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceHealth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceHealth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = ResourceID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Health = ResourceHealthStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ResourceQuota) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -59559,6 +65172,156 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Claims = append(m.Claims, ResourceClaim{}) + if err := m.Claims[len(m.Claims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceHealth{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -62290,6 +68053,42 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppArmorProfile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppArmorProfile == nil { + m.AppArmorProfile = &AppArmorProfile{} + } + if err := m.AppArmorProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -63827,7 +69626,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 0 { @@ -63934,7 +69733,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := IPFamilyPolicyType(dAtA[iNdEx:postIndex]) + s := IPFamilyPolicy(dAtA[iNdEx:postIndex]) m.IPFamilyPolicy = &s iNdEx = postIndex case 18: @@ -64085,9 +69884,42 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := ServiceInternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrafficDistribution", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TrafficDistribution = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -64312,6 +70144,75 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *SleepAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -65523,7 +71424,294 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } return nil } -func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { +func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) + } + m.MaxSkew = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxSkew |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDomains", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinDomains = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinityPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := NodeInclusionPolicy(dAtA[iNdEx:postIndex]) + m.NodeAffinityPolicy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeTaintsPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := NodeInclusionPolicy(dAtA[iNdEx:postIndex]) + m.NodeTaintsPolicy = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -65546,34 +71734,15 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) - } - m.MaxSkew = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxSkew |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -65601,11 +71770,12 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TopologyKey = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -65633,13 +71803,13 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -65649,27 +71819,23 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.LabelSelector == nil { - m.LabelSelector = &v1.LabelSelector{} - } - if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -65692,7 +71858,7 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { } return nil } -func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { +func (m *TypedObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -65715,10 +71881,10 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group") + return fmt.Errorf("proto: TypedObjectReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TypedObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -65818,6 +71984,39 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66278,6 +72477,206 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } m.SubPathExpr = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnly", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := RecursiveReadOnlyMode(dAtA[iNdEx:postIndex]) + m.RecursiveReadOnly = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMountStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMountStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMountStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnly", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := RecursiveReadOnlyMode(dAtA[iNdEx:postIndex]) + m.RecursiveReadOnly = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66479,52 +72878,303 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DownwardAPI == nil { - m.DownwardAPI = &DownwardAPIProjection{} - } - if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIProjection{} + } + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapProjection{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountToken == nil { + m.ServiceAccountToken = &ServiceAccountTokenProjection{} + } + if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterTrustBundle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterTrustBundle == nil { + m.ClusterTrustBundle = &ClusterTrustBundleProjection{} + } + if err := m.ClusterTrustBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = make(ResourceList) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMap == nil { - m.ConfigMap = &ConfigMapProjection{} - } - if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Limits[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -66551,12 +73201,105 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ServiceAccountToken == nil { - m.ServiceAccountToken = &ServiceAccountTokenProjection{} + if m.Requests == nil { + m.Requests = make(ResourceList) } - if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -67652,6 +74395,42 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageVolumeSource{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 0b4584e7e..68ac80ed0 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/core/v1"; // Represents a Persistent Disk resource in AWS. // @@ -37,11 +37,11 @@ option go_package = "v1"; // can only be mounted as read/write once. AWS EBS volumes support // ownership management and SELinux relabeling. message AWSElasticBlockStoreVolumeSource { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore optional string volumeID = 1; - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore @@ -49,15 +49,14 @@ message AWSElasticBlockStoreVolumeSource { // +optional optional string fsType = 2; - // The partition in the volume that you want to mount. + // partition is the partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). // +optional optional int32 partition = 3; - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". + // readOnly value true will force the readOnly setting in VolumeMounts. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional bool readOnly = 4; @@ -78,6 +77,25 @@ message Affinity { optional PodAntiAffinity podAntiAffinity = 3; } +// AppArmorProfile defines a pod or container's AppArmor settings. +// +union +message AppArmorProfile { + // type indicates which kind of AppArmor profile will be applied. + // Valid options are: + // Localhost - a profile pre-loaded on the node. + // RuntimeDefault - the container runtime's default profile. + // Unconfined - no AppArmor enforcement. + // +unionDiscriminator + optional string type = 1; + + // localhostProfile indicates a profile loaded on the node that should be used. + // The profile must be preconfigured on the node to work. + // Must match the loaded name of the profile. + // Must be set if and only if type is "Localhost". + // +optional + optional string localhostProfile = 2; +} + // AttachedVolume describes a volume attached to a node message AttachedVolume { // Name of the attached volume @@ -94,50 +112,55 @@ message AvoidPods { // Bounded-sized list of signatures of pods that should avoid this node, sorted // in timestamp order from oldest to newest. Size of the slice is unspecified. // +optional + // +listType=atomic repeated PreferAvoidPodsEntry preferAvoidPods = 1; } // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. message AzureDiskVolumeSource { - // The Name of the data disk in the blob storage + // diskName is the Name of the data disk in the blob storage optional string diskName = 1; - // The URI the data disk in the blob storage + // diskURI is the URI of data disk in the blob storage optional string diskURI = 2; - // Host Caching mode: None, Read Only, Read Write. + // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) optional string cachingMode = 3; - // Filesystem type to mount. + // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" optional string fsType = 4; - // Defaults to false (read/write). ReadOnly here will force + // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false optional bool readOnly = 5; - // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) optional string kind = 6; } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. message AzureFilePersistentVolumeSource { - // the name of secret that contains Azure Storage Account Name and Key + // secretName is the name of secret that contains Azure Storage Account Name and Key optional string secretName = 1; - // Share Name + // shareName is the azure Share Name optional string shareName = 2; - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 3; - // the namespace of the secret that contains Azure Storage Account Name and Key + // secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key // default is the same as the Pod // +optional optional string secretNamespace = 4; @@ -145,13 +168,13 @@ message AzureFilePersistentVolumeSource { // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. message AzureFileVolumeSource { - // the name of secret that contains Azure Storage Account Name and Key + // secretName is the name of secret that contains Azure Storage Account Name and Key optional string secretName = 1; - // Share Name + // shareName is the azure share Name optional string shareName = 2; - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 3; @@ -163,7 +186,7 @@ message Binding { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The target object that you want to bind to the standard object. optional ObjectReference target = 2; @@ -171,31 +194,30 @@ message Binding { // Represents storage that is managed by an external CSI volume driver (Beta feature) message CSIPersistentVolumeSource { - // Driver is the name of the driver to use for this volume. + // driver is the name of the driver to use for this volume. // Required. optional string driver = 1; - // VolumeHandle is the unique volume name returned by the CSI volume + // volumeHandle is the unique volume name returned by the CSI volume // plugin’s CreateVolume to refer to the volume on all subsequent calls. // Required. optional string volumeHandle = 2; - // Optional: The value to pass to ControllerPublishVolumeRequest. + // readOnly value to pass to ControllerPublishVolumeRequest. // Defaults to false (read/write). // +optional optional bool readOnly = 3; - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. + // fsType to mount. Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". // +optional optional string fsType = 4; - // Attributes of the volume to publish. + // volumeAttributes of the volume to publish. // +optional map volumeAttributes = 5; - // ControllerPublishSecretRef is a reference to the secret object containing + // controllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -203,7 +225,7 @@ message CSIPersistentVolumeSource { // +optional optional SecretReference controllerPublishSecretRef = 6; - // NodeStageSecretRef is a reference to the secret object containing sensitive + // nodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -211,7 +233,7 @@ message CSIPersistentVolumeSource { // +optional optional SecretReference nodeStageSecretRef = 7; - // NodePublishSecretRef is a reference to the secret object containing + // nodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -219,39 +241,46 @@ message CSIPersistentVolumeSource { // +optional optional SecretReference nodePublishSecretRef = 8; - // ControllerExpandSecretRef is a reference to the secret object containing + // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference controllerExpandSecretRef = 9; + + // nodeExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodeExpandVolume call. + // This field is optional, may be omitted if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference nodeExpandSecretRef = 10; } // Represents a source location of a volume to mount, managed by an external CSI driver message CSIVolumeSource { - // Driver is the name of the CSI driver that handles this volume. + // driver is the name of the CSI driver that handles this volume. // Consult with your admin for the correct name as registered in the cluster. optional string driver = 1; - // Specifies a read-only configuration for the volume. + // readOnly specifies a read-only configuration for the volume. // Defaults to false (read/write). // +optional optional bool readOnly = 2; - // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // fsType to mount. Ex. "ext4", "xfs", "ntfs". // If not provided, the empty value is passed to the associated CSI driver // which will determine the default filesystem to apply. // +optional optional string fsType = 3; - // VolumeAttributes stores driver-specific properties that are passed to the CSI + // volumeAttributes stores driver-specific properties that are passed to the CSI // driver. Consult your driver's documentation for supported values. // +optional map volumeAttributes = 4; - // NodePublishSecretRef is a reference to the secret object containing + // nodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -264,40 +293,43 @@ message CSIVolumeSource { message Capabilities { // Added capabilities // +optional + // +listType=atomic repeated string add = 1; // Removed capabilities // +optional + // +listType=atomic repeated string drop = 2; } // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSPersistentVolumeSource { - // Required: Monitors is a collection of Ceph monitors + // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional optional string path = 2; - // Optional: User is the rados user name, default is admin + // user is Optional: User is the rados user name, default is admin // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; - // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional SecretReference secretRef = 5; - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional @@ -307,30 +339,31 @@ message CephFSPersistentVolumeSource { // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSVolumeSource { - // Required: Monitors is a collection of Ceph monitors + // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional optional string path = 2; - // Optional: User is the rados user name, default is admin + // user is optional: User is the rados user name, default is admin // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; - // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 5; - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional @@ -342,24 +375,24 @@ message CephFSVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderPersistentVolumeSource { - // volume id used to identify the volume in cinder. + // volumeID used to identify the volume in cinder. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; - // Filesystem type to mount. + // fsType Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; - // Optional: points to a secret object containing parameters used to connect + // secretRef is Optional: points to a secret object containing parameters used to connect // to OpenStack. // +optional optional SecretReference secretRef = 4; @@ -370,24 +403,24 @@ message CinderPersistentVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderVolumeSource { - // volume id used to identify the volume in cinder. + // volumeID used to identify the volume in cinder. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md optional string volumeID = 1; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional string fsType = 2; - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; - // Optional: points to a secret object containing parameters used to connect + // secretRef is optional: points to a secret object containing parameters used to connect // to OpenStack. // +optional optional LocalObjectReference secretRef = 4; @@ -402,6 +435,40 @@ message ClientIPConfig { optional int32 timeoutSeconds = 1; } +// ClusterTrustBundleProjection describes how to select a set of +// ClusterTrustBundle objects and project their contents into the pod +// filesystem. +message ClusterTrustBundleProjection { + // Select a single ClusterTrustBundle by object name. Mutually-exclusive + // with signerName and labelSelector. + // +optional + optional string name = 1; + + // Select all ClusterTrustBundles that match this signer name. + // Mutually-exclusive with name. The contents of all selected + // ClusterTrustBundles will be unified and deduplicated. + // +optional + optional string signerName = 2; + + // Select all ClusterTrustBundles that match this label selector. Only has + // effect if signerName is set. Mutually-exclusive with name. If unset, + // interpreted as "match nothing". If set but empty, interpreted as "match + // everything". + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; + + // If true, don't block pod startup if the referenced ClusterTrustBundle(s) + // aren't available. If using name, then the named ClusterTrustBundle is + // allowed not to exist. If using signerName, then the combination of + // signerName and labelSelector is allowed to match zero + // ClusterTrustBundles. + // +optional + optional bool optional = 5; + + // Relative path from the volume root to write the bundle. + optional string path = 4; +} + // Information about the condition of a component. message ComponentCondition { // Type of condition for a component. @@ -429,12 +496,14 @@ message ComponentStatus { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // List of component conditions observed // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ComponentCondition conditions = 2; } @@ -444,7 +513,7 @@ message ComponentStatusList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ComponentStatus objects. repeated ComponentStatus items = 2; @@ -455,7 +524,7 @@ message ConfigMap { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the ConfigMap cannot // be updated (only object metadata can be modified). @@ -515,7 +584,7 @@ message ConfigMapKeySelector { message ConfigMapList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ConfigMaps. repeated ConfigMap items = 2; @@ -557,7 +626,7 @@ message ConfigMapNodeConfigSource { message ConfigMapProjection { optional LocalObjectReference localObjectReference = 1; - // If unspecified, each key-value pair in the Data field of the referenced + // items if unspecified, each key-value pair in the Data field of the referenced // ConfigMap will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -565,9 +634,10 @@ message ConfigMapProjection { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; - // Specify whether the ConfigMap or its keys must be defined + // optional specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -581,7 +651,7 @@ message ConfigMapProjection { message ConfigMapVolumeSource { optional LocalObjectReference localObjectReference = 1; - // If unspecified, each key-value pair in the Data field of the referenced + // items if unspecified, each key-value pair in the Data field of the referenced // ConfigMap will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -589,9 +659,10 @@ message ConfigMapVolumeSource { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; - // Optional: mode bits used to set permissions on created files by default. + // defaultMode is optional: mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. // Defaults to 0644. @@ -601,7 +672,7 @@ message ConfigMapVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the ConfigMap or its keys must be defined + // optional specify whether the ConfigMap or its keys must be defined // +optional optional bool optional = 4; } @@ -613,7 +684,7 @@ message Container { // Cannot be updated. optional string name = 1; - // Docker image name. + // Container image name. // More info: https://kubernetes.io/docs/concepts/containers/images // This field is optional to allow higher level config management to default or override // container images in workload controllers like Deployments and StatefulSets. @@ -621,7 +692,7 @@ message Container { optional string image = 2; // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. + // The container image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -629,10 +700,11 @@ message Container { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string command = 3; // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. + // The container image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -640,6 +712,7 @@ message Container { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string args = 4; // Container's working directory. @@ -649,12 +722,12 @@ message Container { // +optional optional string workingDir = 5; - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here + // List of ports to expose from the container. Not specifying a port here // DOES NOT prevent that port from being exposed. Any port which is // listening on the default "0.0.0.0" address inside a container will be // accessible from the network. + // Modifying this array with strategic merge patch may corrupt the data. + // For more information See https://github.com/kubernetes/kubernetes/issues/108255. // Cannot be updated. // +optional // +patchMergeKey=containerPort @@ -671,6 +744,7 @@ message Container { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic repeated EnvFromSource envFrom = 19; // List of environment variables to set in the container. @@ -678,6 +752,8 @@ message Container { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated EnvVar env = 7; // Compute Resources required by this container. @@ -686,16 +762,45 @@ message Container { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + + // RestartPolicy defines the restart behavior of individual containers in a pod. + // This field may only be set for init containers, and the only allowed value is "Always". + // For non-init containers or when this field is not specified, + // the restart behavior is defined by the Pod's restart policy and the container type. + // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // this init container will be continually restarted on + // exit until all regular containers have terminated. Once all regular + // containers have completed, all init containers with restartPolicy "Always" + // will be shut down. This lifecycle differs from normal init containers and + // is often referred to as a "sidecar" container. Although this init + // container still starts in the init container sequence, it does not wait + // for the container to complete before proceeding to the next init + // container. Instead, the next init container starts immediately after this + // init container is started, or after any startupProbe has successfully + // completed. + // +featureGate=SidecarContainers + // +optional + optional string restartPolicy = 24; + // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional repeated VolumeDevice volumeDevices = 21; @@ -787,8 +892,9 @@ message Container { // Describe a container image message ContainerImage { // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"] // +optional + // +listType=atomic repeated string names = 1; // The size of the image in bytes. @@ -826,6 +932,17 @@ message ContainerPort { optional string hostIP = 5; } +// ContainerResizePolicy represents resource resize policy for the container. +message ContainerResizePolicy { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + optional string resourceName = 1; + + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + optional string restartPolicy = 2; +} + // ContainerState holds a possible state of container. // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. @@ -847,7 +964,7 @@ message ContainerState { message ContainerStateRunning { // Time at which the container was last (re-)started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; } // ContainerStateTerminated is a terminated state of a container. @@ -869,13 +986,13 @@ message ContainerStateTerminated { // Time at which previous execution of the container started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; // Time at which the container last terminated // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; - // Container's ID in the format 'docker://' + // Container's ID in the format '://' // +optional optional string containerID = 7; } @@ -893,41 +1010,108 @@ message ContainerStateWaiting { // ContainerStatus contains details for the current status of this container. message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. optional string name = 1; - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional optional ContainerState state = 2; - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional optional ContainerState lastState = 3; - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. optional bool ready = 4; - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. optional int32 restartCount = 5; - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. optional string image = 6; - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. optional string imageID = 7; - // Container's ID in the format 'docker://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional optional string containerID = 8; - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional optional bool started = 9; + + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + map allocatedResources = 10; + + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + optional ResourceRequirements resources = 11; + + // Status of volume mounts. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath + // +featureGate=RecursiveReadOnlyMounts + repeated VolumeMountStatus volumeMounts = 12; + + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + optional ContainerUser user = 13; + + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + repeated ResourceStatus allocatedResourcesStatus = 14; +} + +// ContainerUser represents user identity information +message ContainerUser { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + optional LinuxContainerUser linux = 1; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -942,6 +1126,7 @@ message DaemonEndpoint { message DownwardAPIProjection { // Items is a list of DownwardAPIVolume file // +optional + // +listType=atomic repeated DownwardAPIVolumeFile items = 1; } @@ -950,7 +1135,7 @@ message DownwardAPIVolumeFile { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' optional string path = 1; - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. // +optional optional ObjectFieldSelector fieldRef = 2; @@ -974,6 +1159,7 @@ message DownwardAPIVolumeFile { message DownwardAPIVolumeSource { // Items is a list of downward API volume file // +optional + // +listType=atomic repeated DownwardAPIVolumeFile items = 1; // Optional: mode bits to use on created files by default. Must be a @@ -991,32 +1177,29 @@ message DownwardAPIVolumeSource { // Represents an empty directory for a pod. // Empty directory volumes support ownership management and SELinux relabeling. message EmptyDirVolumeSource { - // What type of storage medium should back this directory. + // medium represents what type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional string medium = 1; - // Total amount of local storage required for this EmptyDir volume. + // sizeLimit is the total amount of local storage required for this EmptyDir volume. // The size limit is also applicable for memory medium. // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } // EndpointAddress is a tuple that describes single IP address. // +structType=atomic message EndpointAddress { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). optional string ip = 1; // The Hostname of this endpoint @@ -1052,10 +1235,19 @@ message EndpointPort { optional string protocol = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -1078,16 +1270,19 @@ message EndpointSubset { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. // +optional + // +listType=atomic repeated EndpointAddress addresses = 1; // IP addresses which offer the related ports but are not currently marked as ready // because they have not yet finished starting, have recently failed a readiness check, // or have recently failed a liveness check. // +optional + // +listType=atomic repeated EndpointAddress notReadyAddresses = 2; // Port numbers available on the related IP addresses. // +optional + // +listType=atomic repeated EndpointPort ports = 3; } @@ -1108,7 +1303,7 @@ message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -1118,6 +1313,7 @@ message Endpoints { // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. // +optional + // +listType=atomic repeated EndpointSubset subsets = 2; } @@ -1126,7 +1322,7 @@ message EndpointsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of endpoints. repeated Endpoints items = 2; @@ -1198,8 +1394,6 @@ message EnvVarSource { // // To add an ephemeral container, use the ephemeralcontainers subresource of an existing // Pod. Ephemeral containers may not be removed or restarted. -// -// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. message EphemeralContainer { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -1226,12 +1420,12 @@ message EphemeralContainerCommon { // This name must be unique among all containers, init containers and ephemeral containers. optional string name = 1; - // Docker image name. + // Container image name. // More info: https://kubernetes.io/docs/concepts/containers/images optional string image = 2; // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. + // The image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -1239,10 +1433,11 @@ message EphemeralContainerCommon { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string command = 3; // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. + // The image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -1250,6 +1445,7 @@ message EphemeralContainerCommon { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic repeated string args = 4; // Container's working directory. @@ -1275,6 +1471,7 @@ message EphemeralContainerCommon { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic repeated EnvFromSource envFrom = 19; // List of environment variables to set in the container. @@ -1282,6 +1479,8 @@ message EphemeralContainerCommon { // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated EnvVar env = 7; // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources @@ -1289,16 +1488,34 @@ message EphemeralContainerCommon { // +optional optional ResourceRequirements resources = 8; + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + repeated ContainerResizePolicy resizePolicy = 23; + + // Restart policy for the container to manage the restart behavior of each + // container within a pod. + // This may only be set for init containers. You cannot set this field on + // ephemeral containers. + // +featureGate=SidecarContainers + // +optional + optional string restartPolicy = 24; + // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional repeated VolumeDevice volumeDevices = 21; @@ -1408,7 +1625,7 @@ message EphemeralVolumeSource { message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. optional ObjectReference involvedObject = 2; @@ -1430,11 +1647,11 @@ message Event { // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; // The time at which the most recent occurrence of this event was recorded. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; // The number of times this event has occurred. // +optional @@ -1446,7 +1663,7 @@ message Event { // Time when this Event was first observed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; // Data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -1474,7 +1691,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of events repeated Event items = 2; @@ -1487,7 +1704,7 @@ message EventSeries { optional int32 count = 1; // Time of the last occurrence observed - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } // EventSource contains information for an event. @@ -1509,6 +1726,7 @@ message ExecAction { // a shell, you need to explicitly call out to that shell. // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. // +optional + // +listType=atomic repeated string command = 1; } @@ -1516,45 +1734,47 @@ message ExecAction { // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. message FCVolumeSource { - // Optional: FC target worldwide names (WWNs) + // targetWWNs is Optional: FC target worldwide names (WWNs) // +optional + // +listType=atomic repeated string targetWWNs = 1; - // Optional: FC target lun number + // lun is Optional: FC target lun number // +optional optional int32 lun = 2; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional optional string fsType = 3; - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 4; - // Optional: FC volume world wide identifiers (wwids) + // wwids Optional: FC volume world wide identifiers (wwids) // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. // +optional + // +listType=atomic repeated string wwids = 5; } // FlexPersistentVolumeSource represents a generic persistent volume resource that is // provisioned/attached using an exec based plugin. message FlexPersistentVolumeSource { - // Driver is the name of the driver to use for this volume. + // driver is the name of the driver to use for this volume. optional string driver = 1; - // Filesystem type to mount. + // fsType is the Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. // +optional optional string fsType = 2; - // Optional: SecretRef is reference to the secret object containing + // secretRef is Optional: SecretRef is reference to the secret object containing // sensitive information to pass to the plugin scripts. This may be // empty if no secret object is specified. If the secret object // contains more than one secret, all secrets are passed to the plugin @@ -1562,12 +1782,12 @@ message FlexPersistentVolumeSource { // +optional optional SecretReference secretRef = 3; - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 4; - // Optional: Extra command options if any. + // options is Optional: this field holds extra command options if any. // +optional map options = 5; } @@ -1575,16 +1795,16 @@ message FlexPersistentVolumeSource { // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. message FlexVolumeSource { - // Driver is the name of the driver to use for this volume. + // driver is the name of the driver to use for this volume. optional string driver = 1; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. // +optional optional string fsType = 2; - // Optional: SecretRef is reference to the secret object containing + // secretRef is Optional: secretRef is reference to the secret object containing // sensitive information to pass to the plugin scripts. This may be // empty if no secret object is specified. If the secret object // contains more than one secret, all secrets are passed to the plugin @@ -1592,12 +1812,12 @@ message FlexVolumeSource { // +optional optional LocalObjectReference secretRef = 3; - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 4; - // Optional: Extra command options if any. + // options is Optional: this field holds extra command options if any. // +optional map options = 5; } @@ -1606,12 +1826,12 @@ message FlexVolumeSource { // One and only one of datasetName and datasetUUID should be set. // Flocker volumes do not support ownership management or SELinux relabeling. message FlockerVolumeSource { - // Name of the dataset stored as metadata -> name on the dataset for Flocker + // datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker // should be considered as deprecated // +optional optional string datasetName = 1; - // UUID of the dataset. This is unique identifier of a Flocker dataset + // datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset // +optional optional string datasetUUID = 2; } @@ -1623,11 +1843,11 @@ message FlockerVolumeSource { // can only be mounted as read/write once or read-only many times. GCE // PDs support ownership management and SELinux relabeling. message GCEPersistentDiskVolumeSource { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk optional string pdName = 1; - // Filesystem type of the volume that you want to mount. + // fsType is filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk @@ -1635,7 +1855,7 @@ message GCEPersistentDiskVolumeSource { // +optional optional string fsType = 2; - // The partition in the volume that you want to mount. + // partition is the partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). @@ -1643,7 +1863,7 @@ message GCEPersistentDiskVolumeSource { // +optional optional int32 partition = 3; - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional @@ -1671,14 +1891,14 @@ message GRPCAction { // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. message GitRepoVolumeSource { - // Repository URL + // repository is the URL optional string repository = 1; - // Commit hash for the specified revision. + // revision is the commit hash for the specified revision. // +optional optional string revision = 2; - // Target directory name. + // directory is the target directory name. // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the // git repository. Otherwise, if specified, the volume will contain the git repository in // the subdirectory with the given name. @@ -1689,21 +1909,21 @@ message GitRepoVolumeSource { // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsPersistentVolumeSource { - // EndpointsName is the endpoint name that details Glusterfs topology. + // endpoints is the endpoint name that details Glusterfs topology. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; - // Path is the Glusterfs volume path. + // path is the Glusterfs volume path. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // readOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; - // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // endpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional @@ -1713,15 +1933,15 @@ message GlusterfsPersistentVolumeSource { // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { - // EndpointsName is the endpoint name that details Glusterfs topology. + // endpoints is the endpoint name that details Glusterfs topology. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; - // Path is the Glusterfs volume path. + // path is the Glusterfs volume path. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod optional string path = 2; - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // readOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional @@ -1737,7 +1957,7 @@ message HTTPGetAction { // Name or number of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // Host name to connect to, defaults to the pod IP. You probably want to set // "Host" in httpHeaders instead. @@ -1751,12 +1971,14 @@ message HTTPGetAction { // Custom headers to set in the request. HTTP allows repeated headers. // +optional + // +listType=atomic repeated HTTPHeader httpHeaders = 5; } // HTTPHeader describes a custom header to be used in HTTP probes message HTTPHeader { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. optional string name = 1; // The header field value @@ -1767,21 +1989,30 @@ message HTTPHeader { // pod's hosts file. message HostAlias { // IP address of the host file entry. + // +required optional string ip = 1; // Hostnames for the above IP address. + // +listType=atomic repeated string hostnames = 2; } +// HostIP represents a single IP address allocated to the host. +message HostIP { + // IP is the IP address assigned to the host + // +required + optional string ip = 1; +} + // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. message HostPathVolumeSource { - // Path of the directory on the host. + // path of the directory on the host. // If the path is a symlink, it will follow the link to the real path. // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath optional string path = 1; - // Type for HostPath Volume + // type for HostPath Volume // Defaults to "" // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // +optional @@ -1792,22 +2023,23 @@ message HostPathVolumeSource { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. message ISCSIPersistentVolumeSource { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). optional string targetPortal = 1; - // Target iSCSI Qualified Name. + // iqn is Target iSCSI Qualified Name. optional string iqn = 2; - // iSCSI Target Lun number. + // lun is iSCSI Target Lun number. optional int32 lun = 3; - // iSCSI Interface Name that uses an iSCSI transport. + // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi @@ -1815,29 +2047,30 @@ message ISCSIPersistentVolumeSource { // +optional optional string fsType = 5; - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // +optional optional bool readOnly = 6; - // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic repeated string portals = 7; - // whether support iSCSI Discovery CHAP authentication + // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication // +optional optional bool chapAuthDiscovery = 8; - // whether support iSCSI Session CHAP authentication + // chapAuthSession defines whether support iSCSI Session CHAP authentication // +optional optional bool chapAuthSession = 11; - // CHAP Secret for iSCSI target and initiator authentication + // secretRef is the CHAP Secret for iSCSI target and initiator authentication // +optional optional SecretReference secretRef = 10; - // Custom iSCSI Initiator Name. + // initiatorName is the custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1848,22 +2081,23 @@ message ISCSIPersistentVolumeSource { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. message ISCSIVolumeSource { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). optional string targetPortal = 1; - // Target iSCSI Qualified Name. + // iqn is the target iSCSI Qualified Name. optional string iqn = 2; - // iSCSI Target Lun number. + // lun represents iSCSI Target Lun number. optional int32 lun = 3; - // iSCSI Interface Name that uses an iSCSI transport. + // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi @@ -1871,47 +2105,68 @@ message ISCSIVolumeSource { // +optional optional string fsType = 5; - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // +optional optional bool readOnly = 6; - // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + // portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic repeated string portals = 7; - // whether support iSCSI Discovery CHAP authentication + // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication // +optional optional bool chapAuthDiscovery = 8; - // whether support iSCSI Session CHAP authentication + // chapAuthSession defines whether support iSCSI Session CHAP authentication // +optional optional bool chapAuthSession = 11; - // CHAP Secret for iSCSI target and initiator authentication + // secretRef is the CHAP Secret for iSCSI target and initiator authentication // +optional optional LocalObjectReference secretRef = 10; - // Custom iSCSI Initiator Name. + // initiatorName is the custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional optional string initiatorName = 12; } +// ImageVolumeSource represents a image volume resource. +message ImageVolumeSource { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + optional string reference = 1; + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + optional string pullPolicy = 2; +} + // Maps a string key to a path within a volume. message KeyToPath { - // The key to project. + // key is the key to project. optional string key = 1; - // The relative path of the file to map the key to. + // path is the relative path of the file to map the key to. // May not be an absolute path. // May not contain the path element '..'. // May not start with the string '..'. optional string path = 2; - // Optional: mode bits used to set permissions on this file. + // mode is Optional: mode bits used to set permissions on this file. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. // If not specified, the volume defaultMode will be used. @@ -1961,6 +2216,11 @@ message LifecycleHandler { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional optional TCPSocketAction tcpSocket = 3; + + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + optional SleepAction sleep = 4; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. @@ -1968,7 +2228,7 @@ message LimitRange { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -1983,23 +2243,23 @@ message LimitRangeItem { // Max usage constraints on this kind by resource name. // +optional - map max = 2; + map max = 2; // Min usage constraints on this kind by resource name. // +optional - map min = 3; + map min = 3; // Default resource requirement limit value by resource name if resource limit is omitted. // +optional - map default = 4; + map default = 4; // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. // +optional - map defaultRequest = 5; + map defaultRequest = 5; // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. // +optional - map maxLimitRequestRatio = 6; + map maxLimitRequestRatio = 6; } // LimitRangeList is a list of LimitRange items. @@ -2007,7 +2267,7 @@ message LimitRangeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of LimitRange objects. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -2017,18 +2277,33 @@ message LimitRangeList { // LimitRangeSpec defines a min/max usage limit for resources that match on kind. message LimitRangeSpec { // Limits is the list of LimitRangeItem objects that are enforced. + // +listType=atomic repeated LimitRangeItem limits = 1; } +// LinuxContainerUser represents user identity information in Linux containers +message LinuxContainerUser { + // UID is the primary uid initially attached to the first process in the container + optional int64 uid = 1; + + // GID is the primary gid initially attached to the first process in the container + optional int64 gid = 2; + + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + repeated int64 supplementalGroups = 3; +} + // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // LoadBalancerIngress represents the status of a load-balancer ingress point: @@ -2044,6 +2319,15 @@ message LoadBalancerIngress { // +optional optional string hostname = 2; + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + optional string ipMode = 3; + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -2056,6 +2340,7 @@ message LoadBalancerStatus { // Ingress is a list containing ingress points for the load-balancer. // Traffic intended for the service should be sent to these ingress points. // +optional + // +listType=atomic repeated LoadBalancerIngress ingress = 1; } @@ -2064,19 +2349,25 @@ message LoadBalancerStatus { // +structType=atomic message LocalObjectReference { // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // This field is effectively required, but due to backwards compatibility is + // allowed to be empty. Instances of this type with an empty value here are + // almost certainly wrong. // TODO: Add other useful fields. apiVersion, kind, uid? + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional + // +default="" + // +kubebuilder:default="" + // TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. optional string name = 1; } // Local represents directly-attached storage with node affinity (Beta feature) message LocalVolumeSource { - // The full path to the volume on the node. + // path of the full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). optional string path = 1; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // It applies only when the Path is a block device. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified. @@ -2084,19 +2375,36 @@ message LocalVolumeSource { optional string fsType = 2; } +// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation +message ModifyVolumeStatus { + // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled + optional string targetVolumeAttributesClassName = 1; + + // status is the status of the ControllerModifyVolume operation. It can be in any of following states: + // - Pending + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing. + // - InProgress + // InProgress indicates that the volume is being modified. + // - Infeasible + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified. + // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. + optional string status = 2; +} + // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. message NFSVolumeSource { - // Server is the hostname or IP address of the NFS server. + // server is the hostname or IP address of the NFS server. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs optional string server = 1; - // Path that is exported by the NFS server. + // path that is exported by the NFS server. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs optional string path = 2; - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. + // readOnly here will force the NFS export to be mounted with read-only permissions. // Defaults to false. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional @@ -2109,7 +2417,7 @@ message Namespace { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2131,7 +2439,7 @@ message NamespaceCondition { optional string status = 2; // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // +optional optional string reason = 5; @@ -2145,7 +2453,7 @@ message NamespaceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ @@ -2157,6 +2465,7 @@ message NamespaceSpec { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional + // +listType=atomic repeated string finalizers = 1; } @@ -2171,6 +2480,8 @@ message NamespaceStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NamespaceCondition conditions = 2; } @@ -2180,7 +2491,7 @@ message Node { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2224,6 +2535,7 @@ message NodeAffinity { // "weight" to the sum if the node matches the corresponding matchExpressions; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; } @@ -2237,11 +2549,11 @@ message NodeCondition { // Last time we got an update on a given condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -2316,12 +2628,21 @@ message NodeDaemonEndpoints { optional DaemonEndpoint kubeletEndpoint = 1; } +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +message NodeFeatures { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + optional bool supplementalGroupsPolicy = 1; +} + // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of nodes repeated Node items = 2; @@ -2334,11 +2655,29 @@ message NodeProxyOptions { optional string path = 1; } -// NodeResources is an object for conveying resource information about a node. -// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details. -message NodeResources { - // Capacity represents the available resources of a node - map capacity = 1; +// NodeRuntimeHandler is a set of runtime handler information. +message NodeRuntimeHandler { + // Runtime handler name. + // Empty for the default runtime handler. + // +optional + optional string name = 1; + + // Supported features. + // +optional + optional NodeRuntimeHandlerFeatures features = 2; +} + +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. +message NodeRuntimeHandlerFeatures { + // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. + // +featureGate=RecursiveReadOnlyMounts + // +optional + optional bool recursiveReadOnlyMounts = 1; + + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + optional bool userNamespaces = 2; } // A node selector represents the union of the results of one or more label queries @@ -2347,6 +2686,7 @@ message NodeResources { // +structType=atomic message NodeSelector { // Required. A list of node selector terms. The terms are ORed. + // +listType=atomic repeated NodeSelectorTerm nodeSelectorTerms = 1; } @@ -2366,6 +2706,7 @@ message NodeSelectorRequirement { // array must have a single element, which will be interpreted as an integer. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic repeated string values = 3; } @@ -2376,10 +2717,12 @@ message NodeSelectorRequirement { message NodeSelectorTerm { // A list of node selector requirements by node's labels. // +optional + // +listType=atomic repeated NodeSelectorRequirement matchExpressions = 1; // A list of node selector requirements by node's fields. // +optional + // +listType=atomic repeated NodeSelectorRequirement matchFields = 2; } @@ -2394,6 +2737,7 @@ message NodeSpec { // each of IPv4 and IPv6. // +optional // +patchStrategy=merge + // +listType=set repeated string podCIDRs = 7; // ID of the node assigned by the cloud provider in the format: :// @@ -2407,11 +2751,10 @@ message NodeSpec { // If specified, the node's taints. // +optional + // +listType=atomic repeated Taint taints = 5; - // Deprecated. If specified, the source of the node's configuration. - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. - // This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration + // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed. // +optional optional NodeConfigSource configSource = 6; @@ -2424,14 +2767,14 @@ message NodeSpec { // NodeStatus is information about the current status of a node. message NodeStatus { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional - map capacity = 1; + map capacity = 1; // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. // +optional - map allocatable = 2; + map allocatable = 2; // NodePhase is the recently observed lifecycle phase of the node. // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase @@ -2444,6 +2787,8 @@ message NodeStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NodeCondition conditions = 4; // List of addresses reachable to the node. @@ -2451,10 +2796,16 @@ message NodeStatus { // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead - // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. + // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NodeAddress addresses = 5; // Endpoints of daemons running on the Node. @@ -2468,19 +2819,34 @@ message NodeStatus { // List of container images on this node // +optional + // +listType=atomic repeated ContainerImage images = 8; // List of attachable volumes in use (mounted) by the node. // +optional + // +listType=atomic repeated string volumesInUse = 9; // List of volumes that are attached to the node. // +optional + // +listType=atomic repeated AttachedVolume volumesAttached = 10; // Status of the config assigned to the node via the dynamic Kubelet config feature. // +optional optional NodeConfigStatus config = 11; + + // The available runtime handlers. + // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport + // +optional + // +listType=atomic + repeated NodeRuntimeHandler runtimeHandlers = 12; + + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + optional NodeFeatures features = 13; } // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. @@ -2504,13 +2870,13 @@ message NodeSystemInfo { // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). optional string osImage = 5; - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + // ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2). optional string containerRuntimeVersion = 6; // Kubelet Version reported by the node. optional string kubeletVersion = 7; - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. optional string kubeProxyVersion = 8; // The Operating System reported by the node @@ -2598,15 +2964,15 @@ message PersistentVolume { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines a specification of a persistent volume owned by the cluster. + // spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional optional PersistentVolumeSpec spec = 2; - // Status represents the current information/status for the persistent volume. + // status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes @@ -2619,41 +2985,41 @@ message PersistentVolumeClaim { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the desired characteristics of a volume requested by a pod author. + // spec defines the desired characteristics of a volume requested by a pod author. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional optional PersistentVolumeClaimSpec spec = 2; - // Status represents the current information/status of a persistent volume claim. + // status represents the current information/status of a persistent volume claim. // Read-only. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional optional PersistentVolumeClaimStatus status = 3; } -// PersistentVolumeClaimCondition contails details about state of pvc +// PersistentVolumeClaimCondition contains details about state of pvc message PersistentVolumeClaimCondition { optional string type = 1; optional string status = 2; - // Last time we probed the condition. + // lastProbeTime is the time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; - // Last time the condition transitioned from one status to another. + // lastTransitionTime is the time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; - // Unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // reason is a unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "Resizing" that means the underlying // persistent volume is being resized. // +optional optional string reason = 5; - // Human-readable message indicating details about last transition. + // message is the human-readable message indicating details about last transition. // +optional optional string message = 6; } @@ -2663,9 +3029,9 @@ message PersistentVolumeClaimList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // A list of persistent volume claims. + // items is a list of persistent volume claims. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims repeated PersistentVolumeClaim items = 2; } @@ -2673,28 +3039,29 @@ message PersistentVolumeClaimList { // PersistentVolumeClaimSpec describes the common attributes of storage devices // and allows a Source for provider-specific attributes message PersistentVolumeClaimSpec { - // AccessModes contains the desired access modes the volume should have. + // accessModes contains the desired access modes the volume should have. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic repeated string accessModes = 1; - // A label query over volumes to consider for binding. + // selector is a label query over volumes to consider for binding. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - // Resources represents the minimum resources the volume should have. + // resources represents the minimum resources the volume should have. // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements // that are lower than previous value but must still be higher than capacity recorded in the // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - optional ResourceRequirements resources = 2; + optional VolumeResourceRequirements resources = 2; - // VolumeName is the binding reference to the PersistentVolume backing this claim. + // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional optional string volumeName = 3; - // Name of the StorageClass required by the claim. + // storageClassName is the name of the StorageClass required by the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional optional string storageClassName = 5; @@ -2704,78 +3071,164 @@ message PersistentVolumeClaimSpec { // +optional optional string volumeMode = 6; - // This field can be used to specify either: + // dataSource field can be used to specify either: // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the AnyVolumeDataSource feature gate is enabled, this field will always have - // the same contents as the DataSourceRef field. + // When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + // and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + // If the namespace is specified, then dataSourceRef will not be copied to dataSource. // +optional optional TypedLocalObjectReference dataSource = 7; - // Specifies the object from which to populate the volume with data, if a non-empty - // volume is desired. This may be any local object from a non-empty API group (non + // dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + // volume is desired. This may be any object from a non-empty API group (non // core object) or a PersistentVolumeClaim object. // When this field is specified, volume binding will only succeed if the type of // the specified object matches some installed volume populator or dynamic // provisioner. - // This field will replace the functionality of the DataSource field and as such + // This field will replace the functionality of the dataSource field and as such // if both fields are non-empty, they must have the same value. For backwards - // compatibility, both fields (DataSource and DataSourceRef) will be set to the same + // compatibility, when namespace isn't specified in dataSourceRef, + // both fields (dataSource and dataSourceRef) will be set to the same // value automatically if one of them is empty and the other is non-empty. - // There are two important differences between DataSource and DataSourceRef: - // * While DataSource only allows two specific types of objects, DataSourceRef + // When namespace is specified in dataSourceRef, + // dataSource isn't set to the same value and must be empty. + // There are three important differences between dataSource and dataSourceRef: + // * While dataSource only allows two specific types of objects, dataSourceRef // allows any non-core object, as well as PersistentVolumeClaim objects. - // * While DataSource ignores disallowed values (dropping them), DataSourceRef + // * While dataSource ignores disallowed values (dropping them), dataSourceRef // preserves all values, and generates an error if a disallowed value is // specified. - // (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - // +optional - optional TypedLocalObjectReference dataSourceRef = 8; + // * While dataSource only allows local objects, dataSourceRef allows objects + // in any namespaces. + // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + // +optional + optional TypedObjectReference dataSourceRef = 8; + + // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + // If specified, the CSI driver will create or update the volume with the attributes defined + // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + // will be set by the persistentvolume controller if it exists. + // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + // exists. + // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + // +featureGate=VolumeAttributesClass + // +optional + optional string volumeAttributesClassName = 9; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. message PersistentVolumeClaimStatus { - // Phase represents the current phase of PersistentVolumeClaim. + // phase represents the current phase of PersistentVolumeClaim. // +optional optional string phase = 1; - // AccessModes contains the actual access modes the volume backing the PVC has. + // accessModes contains the actual access modes the volume backing the PVC has. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic repeated string accessModes = 2; - // Represents the actual resources of the underlying volume. + // capacity represents the actual resources of the underlying volume. // +optional - map capacity = 3; + map capacity = 3; - // Current Condition of persistent volume claim. If underlying persistent volume is being - // resized then the Condition will be set to 'ResizeStarted'. + // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'Resizing'. // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated PersistentVolumeClaimCondition conditions = 4; - // The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may - // be larger than the actual capacity when a volume expansion operation is requested. + // allocatedResources tracks the resources allocated to a PVC including its capacity. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // Capacity reported here may be larger than the actual capacity when a volume expansion operation + // is requested. // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. // If a volume expansion capacity request is lowered, allocatedResources is only // lowered if there are no expansion operations in progress and if the actual volume capacity // is equal or lower than the requested capacity. + // + // A controller that receives PVC update with previously unknown resourceName + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure // +optional - map allocatedResources = 5; + map allocatedResources = 5; - // ResizeStatus stores status of resize operation. - // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty - // string by resize controller or kubelet. + // allocatedResourceStatuses stores status of resource being resized for the given PVC. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // ClaimResourceStatus can be in any of following states: + // - ControllerResizeInProgress: + // State set when resize controller starts resizing the volume in control-plane. + // - ControllerResizeFailed: + // State set when resize has failed in resize controller with a terminal error. + // - NodeResizePending: + // State set when resize controller has finished resizing the volume but further resizing of + // volume is needed on the node. + // - NodeResizeInProgress: + // State set when kubelet starts resizing the volume. + // - NodeResizeFailed: + // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set + // NodeResizeFailed. + // For example: if expanding a PVC for more capacity - this field can be one of the following states: + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed" + // When this field is not set, it means that no resize operation is in progress for the given PVC. + // + // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure + // +mapType=granular + // +optional + map allocatedResourceStatuses = 7; + + // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. + // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). + // +featureGate=VolumeAttributesClass // +optional - optional string resizeStatus = 6; + optional string currentVolumeAttributesClassName = 8; + + // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. + // When this is unset, there is no ModifyVolume operation being attempted. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). + // +featureGate=VolumeAttributesClass + // +optional + optional ModifyVolumeStatus modifyVolumeStatus = 9; } // PersistentVolumeClaimTemplate is used to produce @@ -2786,7 +3239,7 @@ message PersistentVolumeClaimTemplate { // validation. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The specification for the PersistentVolumeClaim. The entire content is // copied unchanged into the PVC that gets created from this @@ -2800,11 +3253,11 @@ message PersistentVolumeClaimTemplate { // PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another // type of volume that is owned by someone else (the system). message PersistentVolumeClaimVolumeSource { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims optional string claimName = 1; - // Will force the ReadOnly setting in VolumeMounts. + // readOnly Will force the ReadOnly setting in VolumeMounts. // Default false. // +optional optional bool readOnly = 2; @@ -2815,9 +3268,9 @@ message PersistentVolumeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of persistent volumes. + // items is a list of persistent volumes. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes repeated PersistentVolume items = 2; } @@ -2825,19 +3278,19 @@ message PersistentVolumeList { // PersistentVolumeSource is similar to VolumeSource but meant for the // administrator who creates PVs. Exactly one of its members must be set. message PersistentVolumeSource { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; - // HostPath represents a directory on the host. + // hostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. @@ -2845,113 +3298,115 @@ message PersistentVolumeSource { // +optional optional HostPathVolumeSource hostPath = 3; - // Glusterfs represents a Glusterfs volume that is attached to a host and + // glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsPersistentVolumeSource glusterfs = 4; - // NFS represents an NFS mount on the host. Provisioned by an admin. + // nfs represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional optional NFSVolumeSource nfs = 5; - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; - // ISCSI represents an ISCSI Disk resource that is attached to a + // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional optional ISCSIPersistentVolumeSource iscsi = 7; - // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // cinder represents a cinder volume attached and mounted on kubelets host machine. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional optional CephFSPersistentVolumeSource cephfs = 9; - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional optional FCVolumeSource fc = 10; - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running // +optional optional FlockerVolumeSource flocker = 11; - // FlexVolume represents a generic volume resource that is + // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional optional FlexPersistentVolumeSource flexVolume = 12; - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional optional AzureFilePersistentVolumeSource azureFile = 13; - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional optional QuobyteVolumeSource quobyte = 15; - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. // +optional optional AzureDiskVolumeSource azureDisk = 16; - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine // +optional optional PortworxVolumeSource portworxVolume = 18; - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional optional ScaleIOPersistentVolumeSource scaleIO = 19; - // Local represents directly-attached storage with node affinity + // local represents directly-attached storage with node affinity // +optional optional LocalVolumeSource local = 20; - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; - // CSI represents storage that is handled by an external CSI driver (Beta feature). + // csi represents storage that is handled by an external CSI driver (Beta feature). // +optional optional CSIPersistentVolumeSource csi = 22; } // PersistentVolumeSpec is the specification of a persistent volume. message PersistentVolumeSpec { - // A description of the persistent volume's resources and capacity. + // capacity is the description of the persistent volume's resources and capacity. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional - map capacity = 1; + map capacity = 1; - // The actual volume backing the persistent volume. + // persistentVolumeSource is the actual volume backing the persistent volume. optional PersistentVolumeSource persistentVolumeSource = 2; - // AccessModes contains all ways the volume can be mounted. + // accessModes contains all ways the volume can be mounted. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes // +optional + // +listType=atomic repeated string accessModes = 3; - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding // +optional + // +structType=granular optional ObjectReference claimRef = 4; - // What happens to a persistent volume when released from its claim. + // persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. // Valid options are Retain (default for manually created PersistentVolumes), Delete (default // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). // Recycle must be supported by the volume plugin underlying this PersistentVolume. @@ -2959,15 +3414,16 @@ message PersistentVolumeSpec { // +optional optional string persistentVolumeReclaimPolicy = 5; - // Name of StorageClass to which this persistent volume belongs. Empty value + // storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value // means that this volume does not belong to any StorageClass. // +optional optional string storageClassName = 6; - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will // simply fail if one is invalid. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional + // +listType=atomic repeated string mountOptions = 7; // volumeMode defines if a volume is intended to be used with a formatted filesystem @@ -2975,35 +3431,51 @@ message PersistentVolumeSpec { // +optional optional string volumeMode = 8; - // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // nodeAffinity defines constraints that limit what nodes this volume can be accessed from. // This field influences the scheduling of pods that use this volume. // +optional optional VolumeNodeAffinity nodeAffinity = 9; + + // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value + // is not allowed. When this field is not set, it indicates that this volume does not belong to any + // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver + // after a volume has been updated successfully to a new class. + // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound + // PersistentVolumeClaims during the binding process. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). + // +featureGate=VolumeAttributesClass + // +optional + optional string volumeAttributesClassName = 10; } // PersistentVolumeStatus is the current status of a persistent volume. message PersistentVolumeStatus { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // phase indicates if a volume is available, bound to a claim, or released by a claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase // +optional optional string phase = 1; - // A human-readable message indicating details about why the volume is in this state. + // message is a human-readable message indicating details about why the volume is in this state. // +optional optional string message = 2; - // Reason is a brief CamelCase string that describes any failure and is meant + // reason is a brief CamelCase string that describes any failure and is meant // for machine parsing and tidy display in the CLI. // +optional optional string reason = 3; + + // lastPhaseTransitionTime is the time the phase transitioned from one to another + // and automatically resets to current time everytime a volume phase transitions. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; } // Represents a Photon Controller persistent disk resource. message PhotonPersistentDiskVolumeSource { - // ID that identifies Photon Controller persistent disk + // pdID is the ID that identifies Photon Controller persistent disk optional string pdID = 1; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. optional string fsType = 2; @@ -3015,7 +3487,7 @@ message Pod { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -3041,6 +3513,7 @@ message PodAffinity { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; // The scheduler will prefer to schedule pods to nodes that satisfy @@ -3053,6 +3526,7 @@ message PodAffinity { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; } @@ -3064,14 +3538,16 @@ message PodAffinity { // a pod of the set of pods is running message PodAffinityTerm { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; // namespaces specifies a static list of namespace names that the term applies to. // The term is applied to the union of the namespaces listed in this field // and the ones selected by namespaceSelector. - // null or empty namespaces list and null namespaceSelector means "this pod's namespace" + // null or empty namespaces list and null namespaceSelector means "this pod's namespace". // +optional + // +listType=atomic repeated string namespaces = 2; // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -3086,9 +3562,36 @@ message PodAffinityTerm { // and the ones listed in the namespaces field. // null selector and null or empty namespaces list means "this pod's namespace". // An empty selector ({}) matches all namespaces. - // This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both matchLabelKeys and labelSelector. + // Also, matchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // + // +listType=atomic + // +optional + repeated string matchLabelKeys = 5; + + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // + // +listType=atomic + // +optional + repeated string mismatchLabelKeys = 6; } // Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -3101,6 +3604,7 @@ message PodAntiAffinity { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; // The scheduler will prefer to schedule pods to nodes that satisfy @@ -3113,6 +3617,7 @@ message PodAntiAffinity { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; } @@ -3162,11 +3667,11 @@ message PodCondition { // Last time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -3184,12 +3689,14 @@ message PodDNSConfig { // This will be appended to the base nameservers generated from DNSPolicy. // Duplicated nameservers will be removed. // +optional + // +listType=atomic repeated string nameservers = 1; // A list of DNS search domains for host-name lookup. // This will be appended to the base search paths generated from DNSPolicy. // Duplicated search paths will be removed. // +optional + // +listType=atomic repeated string searches = 2; // A list of DNS resolver options. @@ -3197,6 +3704,7 @@ message PodDNSConfig { // Duplicated entries will be removed. Resolution options given in Options // will override those that appear in the base DNSPolicy. // +optional + // +listType=atomic repeated PodDNSConfigOption options = 3; } @@ -3238,15 +3746,14 @@ message PodExecOptions { optional string container = 5; // Command is the remote command to execute. argv array. Not executed within a shell. + // +listType=atomic repeated string command = 6; } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodIP represents a single IP address allocated to the pod. message PodIP { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod + // +required optional string ip = 1; } @@ -3255,7 +3762,7 @@ message PodList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md @@ -3288,7 +3795,7 @@ message PodLogOptions { // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. @@ -3335,6 +3842,7 @@ message PodPortForwardOptions { // List of ports to forward // Required when using WebSockets // +optional + // +listType=atomic repeated int32 ports = 1; } @@ -3351,6 +3859,67 @@ message PodReadinessGate { optional string conditionType = 1; } +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// +// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. +// Containers that need access to the ResourceClaim reference it with this name. +message PodResourceClaim { + // Name uniquely identifies this resource claim inside the pod. + // This must be a DNS_LABEL. + optional string name = 1; + + // ResourceClaimName is the name of a ResourceClaim object in the same + // namespace as this pod. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimName = 3; + + // ResourceClaimTemplateName is the name of a ResourceClaimTemplate + // object in the same namespace as this pod. + // + // The template will be used to create a new ResourceClaim, which will + // be bound to this pod. When this pod is deleted, the ResourceClaim + // will also be deleted. The pod name and resource name, along with a + // generated component, will be used to form a unique name for the + // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + // + // This field is immutable and no changes will be made to the + // corresponding ResourceClaim by the control plane after creating the + // ResourceClaim. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimTemplateName = 4; +} + +// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim +// which references a ResourceClaimTemplate. It stores the generated name for +// the corresponding ResourceClaim. +message PodResourceClaimStatus { + // Name uniquely identifies this resource claim inside the pod. + // This must match the name of an entry in pod.spec.resourceClaims, + // which implies that the string must be a DNS_LABEL. + optional string name = 1; + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. If this is + // unset, then generating a ResourceClaim was not necessary. The + // pod.spec.resourceClaims entry can be ignored in this case. + // + // +optional + optional string resourceClaimName = 2; +} + +// PodSchedulingGate is associated to a Pod to guard its scheduling. +message PodSchedulingGate { + // Name of the scheduling gate. + // Each scheduling gate must have a unique name field. + optional string name = 1; +} + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -3398,13 +3967,29 @@ message PodSecurityContext { // +optional optional bool runAsNonRoot = 3; - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic repeated int64 supplementalGroups = 4; + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + optional string supplementalGroupsPolicy = 12; + // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -3422,6 +4007,7 @@ message PodSecurityContext { // sysctls (by the container runtime) might fail to launch. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic repeated Sysctl sysctls = 7; // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume @@ -3438,6 +4024,11 @@ message PodSecurityContext { // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 10; + + // appArmorProfile is the AppArmor options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + optional AppArmorProfile appArmorProfile = 11; } // Describes the class of pods that should avoid this node. @@ -3445,7 +4036,7 @@ message PodSecurityContext { message PodSignature { // Reference to controller whose pods should avoid this node. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; } // PodSpec is a description of a pod. @@ -3455,6 +4046,8 @@ message PodSpec { // +optional // +patchMergeKey=name // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name repeated Volume volumes = 1; // List of initialization containers belonging to the pod. @@ -3472,6 +4065,8 @@ message PodSpec { // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated Container initContainers = 20; // List of containers belonging to the pod. @@ -3480,20 +4075,23 @@ message PodSpec { // Cannot be updated. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated Container containers = 2; // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated EphemeralContainer ephemeralContainers = 34; // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3537,7 +4135,7 @@ message PodSpec { // +optional optional string serviceAccountName = 8; - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. // Deprecated: Use serviceAccountName instead. // +k8s:conversion-gen=false // +optional @@ -3547,9 +4145,11 @@ message PodSpec { // +optional optional bool automountServiceAccountToken = 21; - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional optional string nodeName = 10; @@ -3587,12 +4187,13 @@ message PodSpec { optional PodSecurityContext securityContext = 14; // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. + // If specified, these secrets will be passed to individual puller implementations for them to use. // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated LocalObjectReference imagePullSecrets = 15; // Specifies the hostname of the Pod @@ -3616,13 +4217,16 @@ message PodSpec { // If specified, the pod's tolerations. // +optional + // +listType=atomic repeated Toleration tolerations = 22; // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. + // file if specified. // +optional // +patchMergeKey=ip // +patchStrategy=merge + // +listType=map + // +listMapKey=ip repeated HostAlias hostAliases = 23; // If specified, indicates the pod's priority. "system-node-critical" and @@ -3653,6 +4257,7 @@ message PodSpec { // all conditions specified in the readiness gates have status equal to "True" // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates // +optional + // +listType=atomic repeated PodReadinessGate readinessGates = 28; // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used @@ -3660,7 +4265,6 @@ message PodSpec { // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - // This is a beta feature as of Kubernetes v1.14. // +optional optional string runtimeClassName = 29; @@ -3673,7 +4277,6 @@ message PodSpec { // PreemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional optional string preemptionPolicy = 31; @@ -3684,9 +4287,8 @@ message PodSpec { // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional - map overhead = 32; + map overhead = 32; // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. @@ -3716,6 +4318,8 @@ message PodSpec { // If the OS field is set to windows, following fields must be unset: // - spec.hostPID // - spec.hostIPC + // - spec.hostUsers + // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile // - spec.securityContext.fsGroup @@ -3725,6 +4329,8 @@ message PodSpec { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy + // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile // - spec.containers[*].securityContext.capabilities @@ -3735,8 +4341,51 @@ message PodSpec { // - spec.containers[*].securityContext.runAsUser // - spec.containers[*].securityContext.runAsGroup // +optional - // This is an alpha field and requires the IdentifyPodOS feature optional PodOS os = 36; + + // Use the host's user namespace. + // Optional: Default to true. + // If set to true or not present, the pod will be run in the host user namespace, useful + // for when the pod needs a feature only available to the host user namespace, such as + // loading a kernel module with CAP_SYS_MODULE. + // When set to false, a new userns is created for the pod. Setting false is useful for + // mitigating container breakout vulnerabilities even allowing users to run their + // containers as root without actually having root privileges on the host. + // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + // +k8s:conversion-gen=false + // +optional + optional bool hostUsers = 37; + + // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated PodSchedulingGate schedulingGates = 38; + + // ResourceClaims defines which ResourceClaims must be allocated + // and reserved before the Pod is allowed to start. The resources + // will be made available to those containers which consume them + // by name. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. + // + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + repeated PodResourceClaim resourceClaims = 39; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3769,6 +4418,8 @@ message PodStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated PodCondition conditions = 2; // A human readable message indicating details about why the pod is in this condition. @@ -3790,11 +4441,23 @@ message PodStatus { // +optional optional string nominatedNodeName = 11; - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional optional string hostIP = 5; - // IP address allocated to the pod. Routable at least within the cluster. + // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must + // match the hostIP field. This list is empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will + // not be updated even if there is a node is assigned to this pod. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + // +listType=atomic + repeated HostIP hostIPs = 16; + + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional optional string podIP = 6; @@ -3805,35 +4468,54 @@ message PodStatus { // +optional // +patchStrategy=merge // +patchMergeKey=ip + // +listType=map + // +listMapKey=ip repeated PodIP podIPs = 12; // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; // The list has one entry per init container in the manifest. The most recent successful // init container will have ready = true, the most recently started container will have // startTime set. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +listType=atomic repeated ContainerStatus initContainerStatuses = 10; - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. + // The list has one entry per container in the manifest. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional + // +listType=atomic repeated ContainerStatus containerStatuses = 8; // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional optional string qosClass = 9; // Status for any ephemeral containers that have run in this pod. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional + // +listType=atomic repeated ContainerStatus ephemeralContainerStatuses = 13; + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + optional string resize = 14; + + // Status of resource claims. + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + repeated PodResourceClaimStatus resourceClaimStatuses = 15; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded @@ -3841,7 +4523,7 @@ message PodStatusResult { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Most recently observed status of the pod. // This data may not be up to date. @@ -3857,7 +4539,7 @@ message PodTemplate { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -3870,7 +4552,7 @@ message PodTemplateList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pod templates repeated PodTemplate items = 2; @@ -3881,7 +4563,7 @@ message PodTemplateSpec { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -3914,15 +4596,15 @@ message PortStatus { // PortworxVolumeSource represents a Portworx volume resource. message PortworxVolumeSource { - // VolumeID uniquely identifies a Portworx volume + // volumeID uniquely identifies a Portworx volume optional string volumeID = 1; - // FSType represents the filesystem type to mount + // fSType represents the filesystem type to mount // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. optional string fsType = 2; - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 3; @@ -3943,7 +4625,7 @@ message PreferAvoidPodsEntry { // Time at which this entry was added to the list. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; // (brief) reason why this entry was added to the list. // +optional @@ -4026,19 +4708,19 @@ message ProbeHandler { optional TCPSocketAction tcpSocket = 3; // GRPC specifies an action involving a GRPC port. - // This is an alpha field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional optional GRPCAction grpc = 4; } // Represents a projected volume source message ProjectedVolumeSource { - // list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional + // +listType=atomic repeated VolumeProjection sources = 1; - // Mode bits used to set permissions on created files by default. + // defaultMode are the mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. // Directories within the path are not affected by this setting. @@ -4051,30 +4733,30 @@ message ProjectedVolumeSource { // Represents a Quobyte mount that lasts the lifetime of a pod. // Quobyte volumes do not support ownership management or SELinux relabeling. message QuobyteVolumeSource { - // Registry represents a single or multiple Quobyte Registry services + // registry represents a single or multiple Quobyte Registry services // specified as a string as host:port pair (multiple entries are separated with commas) // which acts as the central registry for volumes optional string registry = 1; - // Volume is a string that references an already created Quobyte volume by name. + // volume is a string that references an already created Quobyte volume by name. optional string volume = 2; - // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // readOnly here will force the Quobyte volume to be mounted with read-only permissions. // Defaults to false. // +optional optional bool readOnly = 3; - // User to map volume access to + // user to map volume access to // Defaults to serivceaccount user // +optional optional string user = 4; - // Group to map volume access to + // group to map volume access to // Default is no group // +optional optional string group = 5; - // Tenant owning the given Quobyte volume in the Backend + // tenant owning the given Quobyte volume in the Backend // Used with dynamically provisioned Quobyte volumes, value is set by the plugin // +optional optional string tenant = 6; @@ -4083,15 +4765,16 @@ message QuobyteVolumeSource { // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. message RBDPersistentVolumeSource { - // A collection of Ceph monitors. + // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; - // The rados image name. + // image is the rados image name. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd @@ -4099,32 +4782,35 @@ message RBDPersistentVolumeSource { // +optional optional string fsType = 3; - // The rados pool name. + // pool is the rados pool name. // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; - // The rados user name. + // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; - // Keyring is the path to key ring for RBDUser. + // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; - // SecretRef is name of the authentication secret for RBDUser. If provided + // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional SecretReference secretRef = 7; - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional @@ -4134,15 +4820,16 @@ message RBDPersistentVolumeSource { // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { - // A collection of Ceph monitors. + // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic repeated string monitors = 1; - // The rados image name. + // image is the rados image name. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it optional string image = 2; - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd @@ -4150,32 +4837,35 @@ message RBDVolumeSource { // +optional optional string fsType = 3; - // The rados pool name. + // pool is the rados pool name. // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; - // The rados user name. + // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; - // Keyring is the path to key ring for RBDUser. + // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; - // SecretRef is name of the authentication secret for RBDUser. If provided + // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 7; - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional @@ -4187,7 +4877,7 @@ message RangeAllocation { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Range is string that identifies the range represented by 'data'. optional string range = 2; @@ -4202,7 +4892,7 @@ message ReplicationController { // be the same as the Pod(s) that the replication controller manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4228,7 +4918,7 @@ message ReplicationControllerCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -4244,7 +4934,7 @@ message ReplicationControllerList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of replication controllers. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -4277,6 +4967,7 @@ message ReplicationControllerSpec { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional optional PodTemplateSpec template = 3; @@ -4285,7 +4976,7 @@ message ReplicationControllerSpec { // ReplicationControllerStatus represents the current status of a replication // controller. message ReplicationControllerStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -4309,9 +5000,26 @@ message ReplicationControllerStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicationControllerCondition conditions = 6; } +// ResourceClaim references one entry in PodSpec.ResourceClaims. +message ResourceClaim { + // Name must match the name of one entry in pod.spec.resourceClaims of + // the Pod where this field is used. It makes that resource available + // inside a container. + optional string name = 1; + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + optional string request = 2; +} + // ResourceFieldSelector represents container resources (cpu, memory) and their output format // +structType=atomic message ResourceFieldSelector { @@ -4324,7 +5032,26 @@ message ResourceFieldSelector { // Specifies the output format of the exposed resources, defaults to "1" // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +message ResourceHealth { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + optional string resourceID = 1; + + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + optional string health = 2; } // ResourceQuota sets aggregate quota restrictions enforced per namespace @@ -4332,7 +5059,7 @@ message ResourceQuota { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4350,7 +5077,7 @@ message ResourceQuotaList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ @@ -4362,11 +5089,12 @@ message ResourceQuotaSpec { // hard is the set of desired hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. // +optional + // +listType=atomic repeated string scopes = 2; // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota @@ -4381,11 +5109,11 @@ message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // Used is the current observed total usage of the resource in the namespace. // +optional - map used = 2; + map used = 2; } // ResourceRequirements describes the compute resource requirements. @@ -4393,14 +5121,42 @@ message ResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map requests = 2; + map requests = 2; + + // Claims lists the names of resources, defined in spec.resourceClaims, + // that are used by this container. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. It can only be set for containers. + // + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + repeated ResourceClaim claims = 3; +} + +message ResourceStatus { + // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // +required + optional string name = 1; + + // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. + // At a minimum, ResourceID must uniquely identify the Resource + // allocated to the Pod on the Node for the lifetime of a Pod. + // See ResourceID type for it's definition. + // +listType=map + // +listMapKey=resourceID + repeated ResourceHealth resources = 2; } // SELinuxOptions are the labels to be applied to the container @@ -4424,45 +5180,47 @@ message SELinuxOptions { // ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume message ScaleIOPersistentVolumeSource { - // The host address of the ScaleIO API Gateway. + // gateway is the host address of the ScaleIO API Gateway. optional string gateway = 1; - // The name of the storage system as configured in ScaleIO. + // system is the name of the storage system as configured in ScaleIO. optional string system = 2; - // SecretRef references to the secret for ScaleIO user and other + // secretRef references to the secret for ScaleIO user and other // sensitive information. If this is not provided, Login operation will fail. optional SecretReference secretRef = 3; - // Flag to enable/disable SSL communication with Gateway, default false + // sslEnabled is the flag to enable/disable SSL communication with Gateway, default false // +optional optional bool sslEnabled = 4; - // The name of the ScaleIO Protection Domain for the configured storage. + // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. // +optional optional string protectionDomain = 5; - // The ScaleIO Storage Pool associated with the protection domain. + // storagePool is the ScaleIO Storage Pool associated with the protection domain. // +optional optional string storagePool = 6; - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; - // The name of a volume already created in the ScaleIO system + // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. optional string volumeName = 8; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" optional string fsType = 9; - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 10; @@ -4470,45 +5228,47 @@ message ScaleIOPersistentVolumeSource { // ScaleIOVolumeSource represents a persistent ScaleIO volume message ScaleIOVolumeSource { - // The host address of the ScaleIO API Gateway. + // gateway is the host address of the ScaleIO API Gateway. optional string gateway = 1; - // The name of the storage system as configured in ScaleIO. + // system is the name of the storage system as configured in ScaleIO. optional string system = 2; - // SecretRef references to the secret for ScaleIO user and other + // secretRef references to the secret for ScaleIO user and other // sensitive information. If this is not provided, Login operation will fail. optional LocalObjectReference secretRef = 3; - // Flag to enable/disable SSL communication with Gateway, default false + // sslEnabled Flag enable/disable SSL communication with Gateway, default false // +optional optional bool sslEnabled = 4; - // The name of the ScaleIO Protection Domain for the configured storage. + // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. // +optional optional string protectionDomain = 5; - // The ScaleIO Storage Pool associated with the protection domain. + // storagePool is the ScaleIO Storage Pool associated with the protection domain. // +optional optional string storagePool = 6; - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; - // The name of a volume already created in the ScaleIO system + // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. optional string volumeName = 8; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" optional string fsType = 9; - // Defaults to false (read/write). ReadOnly here will force + // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 10; @@ -4520,6 +5280,7 @@ message ScaleIOVolumeSource { message ScopeSelector { // A list of scope selector requirements by scope of the resources. // +optional + // +listType=atomic repeated ScopedResourceSelectorRequirement matchExpressions = 1; } @@ -4538,6 +5299,7 @@ message ScopedResourceSelectorRequirement { // the values array must be empty. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic repeated string values = 3; } @@ -4557,7 +5319,7 @@ message SeccompProfile { // localhostProfile indicates a profile defined in a file on the node should be used. // The profile must be preconfigured on the node to work. // Must be a descending path, relative to the kubelet's configured seccomp profile location. - // Must only be set if type is "Localhost". + // Must be set if type is "Localhost". Must NOT be set for any other type. // +optional optional string localhostProfile = 2; } @@ -4568,7 +5330,7 @@ message Secret { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the Secret cannot // be updated (only object metadata can be modified). @@ -4631,7 +5393,7 @@ message SecretList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -4647,7 +5409,7 @@ message SecretList { message SecretProjection { optional LocalObjectReference localObjectReference = 1; - // If unspecified, each key-value pair in the Data field of the referenced + // items if unspecified, each key-value pair in the Data field of the referenced // Secret will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -4655,9 +5417,10 @@ message SecretProjection { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; - // Specify whether the Secret or its key must be defined + // optional field specify whether the Secret or its key must be defined // +optional optional bool optional = 4; } @@ -4666,11 +5429,11 @@ message SecretProjection { // in any namespace // +structType=atomic message SecretReference { - // Name is unique within a namespace to reference a secret resource. + // name is unique within a namespace to reference a secret resource. // +optional optional string name = 1; - // Namespace defines the space within which the secret name must be unique. + // namespace defines the space within which the secret name must be unique. // +optional optional string namespace = 2; } @@ -4681,12 +5444,12 @@ message SecretReference { // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. message SecretVolumeSource { - // Name of the secret in the pod's namespace to use. + // secretName is the name of the secret in the pod's namespace to use. // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional optional string secretName = 1; - // If unspecified, each key-value pair in the Data field of the referenced + // items If unspecified, each key-value pair in the Data field of the referenced // Secret will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -4694,9 +5457,10 @@ message SecretVolumeSource { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic repeated KeyToPath items = 2; - // Optional: mode bits used to set permissions on created files by default. + // defaultMode is Optional: mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values // for mode bits. Defaults to 0644. @@ -4706,7 +5470,7 @@ message SecretVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the Secret or its keys must be defined + // optional field specify whether the Secret or its keys must be defined // +optional optional bool optional = 4; } @@ -4785,7 +5549,7 @@ message SecurityContext { optional bool allowPrivilegeEscalation = 7; // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -4798,6 +5562,12 @@ message SecurityContext { // Note that this field cannot be set when spec.os.name is windows. // +optional optional SeccompProfile seccompProfile = 11; + + // appArmorProfile is the AppArmor options to use by this container. If set, this profile + // overrides the pod's appArmorProfile. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + optional AppArmorProfile appArmorProfile = 12; } // SerializedReference is a reference to serialized object. @@ -4814,7 +5584,7 @@ message Service { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4837,13 +5607,18 @@ message ServiceAccount { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. + // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". + // This field should not be used to find auto-generated service account token secrets for use outside of pods. + // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. // More info: https://kubernetes.io/docs/concepts/configuration/secret // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated ObjectReference secrets = 2; // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images @@ -4851,6 +5626,7 @@ message ServiceAccount { // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod // +optional + // +listType=atomic repeated LocalObjectReference imagePullSecrets = 3; // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. @@ -4864,7 +5640,7 @@ message ServiceAccountList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ServiceAccounts. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ @@ -4876,14 +5652,14 @@ message ServiceAccountList { // the pods runtime filesystem for use against APIs (Kubernetes API Server or // otherwise). message ServiceAccountTokenProjection { - // Audience is the intended audience of the token. A recipient of a token + // audience is the intended audience of the token. A recipient of a token // must identify itself with an identifier specified in the audience of the // token, and otherwise should reject the token. The audience defaults to the // identifier of the apiserver. // +optional optional string audience = 1; - // ExpirationSeconds is the requested duration of validity of the service + // expirationSeconds is the requested duration of validity of the service // account token. As the token approaches expiration, the kubelet volume // plugin will proactively rotate the service account token. The kubelet will // start trying to rotate the token if the token is older than 80 percent of @@ -4892,7 +5668,7 @@ message ServiceAccountTokenProjection { // +optional optional int64 expirationSeconds = 2; - // Path is the path relative to the mount point of the file to project the + // path is the path relative to the mount point of the file to project the // token into. optional string path = 3; } @@ -4902,7 +5678,7 @@ message ServiceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of services repeated Service items = 2; @@ -4925,10 +5701,19 @@ message ServicePort { optional string protocol = 2; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 6; @@ -4945,7 +5730,7 @@ message ServicePort { // omitted or set equal to the 'port' field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; // The port on each node on which this service is exposed when type is // NodePort or LoadBalancer. Usually assigned by the system. If a value is @@ -5062,6 +5847,7 @@ message ServiceSpec { // at a node with this IP. A common example is external load-balancers // that are not part of the Kubernetes system. // +optional + // +listType=atomic repeated string externalIPs = 5; // Supports "ClientIP" and "None". Used to maintain session affinity. @@ -5072,11 +5858,13 @@ message ServiceSpec { // +optional optional string sessionAffinity = 7; - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. + // Only applies to Service Type: LoadBalancer. // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional optional string loadBalancerIP = 8; @@ -5085,6 +5873,7 @@ message ServiceSpec { // cloud-provider does not support the feature." // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional + // +listType=atomic repeated string loadBalancerSourceRanges = 9; // externalName is the external reference that discovery mechanisms will @@ -5094,12 +5883,19 @@ message ServiceSpec { // +optional optional string externalName = 10; - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. + // externalTrafficPolicy describes how nodes distribute service traffic they + // receive on one of the Service's "externally-facing" addresses (NodePorts, + // ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + // the service in a way that assumes that external load balancers will take care + // of balancing the service traffic between nodes, and so each node will deliver + // traffic only to the node-local endpoints of the service, without masquerading + // the client source IP. (Traffic mistakenly sent to a node with no endpoints will + // be dropped.) The default value, "Cluster", uses the standard behavior of + // routing to all endpoints evenly (possibly modified by topology and other + // features). Note that traffic sent to an External IP or LoadBalancer IP from + // within the cluster will always get "Cluster" semantics, but clients sending to + // a NodePort from within the cluster may need to take traffic policy into account + // when picking a node. // +optional optional string externalTrafficPolicy = 11; @@ -5112,6 +5908,7 @@ message ServiceSpec { // service or not. If this field is specified when creating a Service // which does not need it, creation will fail. This field will be wiped // when updating a Service to no longer need it (e.g. changing type). + // This field cannot be updated once set. // +optional optional int32 healthCheckNodePort = 12; @@ -5168,8 +5965,6 @@ message ServiceSpec { // value), those requests will be respected, regardless of this field. // This field may only be set for services with type LoadBalancer and will // be cleared if the type is changed to any other type. - // This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature. - // +featureGate=ServiceLBNodePortControl // +optional optional bool allocateLoadBalancerNodePorts = 20; @@ -5183,19 +5978,28 @@ message ServiceSpec { // implementation (e.g. cloud providers) should ignore Services that set this field. // This field can only be set when creating or updating a Service to type 'LoadBalancer'. // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - // +featureGate=LoadBalancerClass // +optional optional string loadBalancerClass = 21; - // InternalTrafficPolicy specifies if the cluster internal traffic - // should be routed to all endpoints or node-local endpoints only. - // "Cluster" routes internal traffic to a Service to all endpoints. - // "Local" routes traffic to node-local endpoints only, traffic is - // dropped if no node-local endpoints are ready. - // The default value is "Cluster". - // +featureGate=ServiceInternalTrafficPolicy + // InternalTrafficPolicy describes how nodes distribute service traffic they + // receive on the ClusterIP. If set to "Local", the proxy will assume that pods + // only want to talk to endpoints of the service on the same node as the pod, + // dropping the traffic if there are no local endpoints. The default value, + // "Cluster", uses the standard behavior of routing to all endpoints evenly + // (possibly modified by topology and other features). // +optional optional string internalTrafficPolicy = 22; + + // TrafficDistribution offers a way to express preferences for how traffic is + // distributed to Service endpoints. Implementations can use this field as a + // hint, but are not required to guarantee strict adherence. If the field is + // not set, the implementation will apply its default routing strategy. If set + // to "PreferClose", implementations should prioritize endpoints that are + // topologically close (e.g., same zone). + // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // +featureGate=ServiceTrafficDistribution + // +optional + optional string trafficDistribution = 23; } // ServiceStatus represents the current status of a service. @@ -5211,7 +6015,7 @@ message ServiceStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. @@ -5221,13 +6025,19 @@ message SessionAffinityConfig { optional ClientIPConfig clientIP = 1; } +// SleepAction describes a "sleep" action. +message SleepAction { + // Seconds is the number of seconds to sleep. + optional int64 seconds = 1; +} + // Represents a StorageOS persistent volume resource. message StorageOSPersistentVolumeSource { - // VolumeName is the human-readable name of the StorageOS volume. Volume + // volumeName is the human-readable name of the StorageOS volume. Volume // names are only unique within a namespace. optional string volumeName = 1; - // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // volumeNamespace specifies the scope of the volume within StorageOS. If no // namespace is specified then the Pod's namespace will be used. This allows the // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. // Set VolumeName to any name to override the default behaviour. @@ -5236,18 +6046,18 @@ message StorageOSPersistentVolumeSource { // +optional optional string volumeNamespace = 2; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional optional string fsType = 3; - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 4; - // SecretRef specifies the secret to use for obtaining the StorageOS API + // secretRef specifies the secret to use for obtaining the StorageOS API // credentials. If not specified, default values will be attempted. // +optional optional ObjectReference secretRef = 5; @@ -5255,11 +6065,11 @@ message StorageOSPersistentVolumeSource { // Represents a StorageOS persistent volume resource. message StorageOSVolumeSource { - // VolumeName is the human-readable name of the StorageOS volume. Volume + // volumeName is the human-readable name of the StorageOS volume. Volume // names are only unique within a namespace. optional string volumeName = 1; - // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // volumeNamespace specifies the scope of the volume within StorageOS. If no // namespace is specified then the Pod's namespace will be used. This allows the // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. // Set VolumeName to any name to override the default behaviour. @@ -5268,18 +6078,18 @@ message StorageOSVolumeSource { // +optional optional string volumeNamespace = 2; - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional optional string fsType = 3; - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional optional bool readOnly = 4; - // SecretRef specifies the secret to use for obtaining the StorageOS API + // secretRef specifies the secret to use for obtaining the StorageOS API // credentials. If not specified, default values will be attempted. // +optional optional LocalObjectReference secretRef = 5; @@ -5299,7 +6109,7 @@ message TCPSocketAction { // Number or name of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; // Optional: Host name to connect to, defaults to the pod IP. // +optional @@ -5324,7 +6134,7 @@ message Taint { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; } // The pod this Toleration is attached to tolerates any taint that matches @@ -5368,6 +6178,7 @@ message TopologySelectorLabelRequirement { // An array of string values. One value must match the label to be selected. // Each entry in Values is ORed. + // +listType=atomic repeated string values = 2; } @@ -5380,6 +6191,7 @@ message TopologySelectorLabelRequirement { message TopologySelectorTerm { // A list of topology selector requirements by labels. // +optional + // +listType=atomic repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; } @@ -5388,15 +6200,18 @@ message TopologySpreadConstraint { // MaxSkew describes the degree to which pods may be unevenly distributed. // When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference // between the number of matching pods in the target topology and the global minimum. + // The global minimum is the minimum number of matching pods in an eligible domain + // or zero if the number of eligible domains is less than MinDomains. // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 1/1/0: + // labelSelector spread as 2/2/1: + // In this case, the global minimum is 1. // +-------+-------+-------+ // | zone1 | zone2 | zone3 | // +-------+-------+-------+ - // | P | P | | + // | P P | P P | P | // +-------+-------+-------+ - // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; - // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + // scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) // violate MaxSkew(1). // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. // When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence @@ -5408,6 +6223,11 @@ message TopologySpreadConstraint { // and identical values are considered to be in the same topology. // We consider each as a "bucket", and try to put balanced number // of pods into each bucket. + // We define a domain as a particular instance of a topology. + // Also, we define an eligible domain as a domain whose nodes meet the requirements of + // nodeAffinityPolicy and nodeTaintsPolicy. + // e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + // And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. // It's a required field. optional string topologyKey = 2; @@ -5438,7 +6258,67 @@ message TopologySpreadConstraint { // Pods that match this label selector are counted to determine the number of pods // in their corresponding topology domain. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; + + // MinDomains indicates a minimum number of eligible domains. + // When the number of eligible domains with matching topology keys is less than minDomains, + // Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + // And when the number of eligible domains with matching topology keys equals or greater than minDomains, + // this value has no effect on scheduling. + // As a result, when the number of eligible domains is less than minDomains, + // scheduler won't schedule more than maxSkew Pods to those domains. + // If value is nil, the constraint behaves as if MinDomains is equal to 1. + // Valid values are integers greater than 0. + // When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + // + // For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + // labelSelector spread as 2/2/2: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P | P P | P P | + // +-------+-------+-------+ + // The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + // In this situation, new pod with the same labelSelector cannot be scheduled, + // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + // it will violate MaxSkew. + // +optional + optional int32 minDomains = 5; + + // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + // when calculating pod topology spread skew. Options are: + // - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + // + // If this value is nil, the behavior is equivalent to the Honor policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + optional string nodeAffinityPolicy = 6; + + // NodeTaintsPolicy indicates how we will treat node taints when calculating + // pod topology spread skew. Options are: + // - Honor: nodes without taints, along with tainted nodes for which the incoming pod + // has a toleration, are included. + // - Ignore: node taints are ignored. All nodes are included. + // + // If this value is nil, the behavior is equivalent to the Ignore policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + optional string nodeTaintsPolicy = 7; + + // MatchLabelKeys is a set of pod label keys to select the pods over which + // spreading will be calculated. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are ANDed with labelSelector + // to select the group of existing pods over which spreading will be calculated + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will + // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + // +listType=atomic + // +optional + repeated string matchLabelKeys = 8; } // TypedLocalObjectReference contains enough information to let you locate the @@ -5458,14 +6338,35 @@ message TypedLocalObjectReference { optional string name = 3; } +message TypedObjectReference { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; + + // Namespace is the namespace of resource being referenced + // Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + // (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + // +featureGate=CrossNamespaceVolumeDataSource + // +optional + optional string namespace = 4; +} + // Volume represents a named volume in a pod that may be accessed by any container in the pod. message Volume { - // Volume's name. + // name of the volume. // Must be a DNS_LABEL and unique within the pod. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names optional string name = 1; - // VolumeSource represents the location and type of the mounted volume. + // volumeSource represents the location and type of the mounted volume. // If not specified, the Volume is implied to be an EmptyDir. // This implied behavior is deprecated and will be removed in a future version. optional VolumeSource volumeSource = 2; @@ -5490,6 +6391,27 @@ message VolumeMount { // +optional optional bool readOnly = 2; + // RecursiveReadOnly specifies whether read-only mounts should be handled + // recursively. + // + // If ReadOnly is false, this field has no meaning and must be unspecified. + // + // If ReadOnly is true, and this field is set to Disabled, the mount is not made + // recursively read-only. If this field is set to IfPossible, the mount is made + // recursively read-only, if it is supported by the container runtime. If this + // field is set to Enabled, the mount is made recursively read-only if it is + // supported by the container runtime, otherwise the pod will not be started and + // an error will be generated to indicate the reason. + // + // If this field is set to IfPossible or Enabled, MountPropagation must be set to + // None (or be unspecified, which defaults to None). + // + // If this field is not specified, it is treated as an equivalent of Disabled. + // + // +featureGate=RecursiveReadOnlyMounts + // +optional + optional string recursiveReadOnly = 7; + // Path within the container at which the volume should be mounted. Must // not contain ':'. optional string mountPath = 3; @@ -5503,6 +6425,8 @@ message VolumeMount { // to container and the other way around. // When not set, MountPropagationNone is used. // This field is beta in 1.10. + // When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + // (which defaults to None). // +optional optional string mountPropagation = 5; @@ -5514,35 +6438,89 @@ message VolumeMount { optional string subPathExpr = 6; } +// VolumeMountStatus shows status of volume mounts. +message VolumeMountStatus { + // Name corresponds to the name of the original VolumeMount. + optional string name = 1; + + // MountPath corresponds to the original VolumeMount. + optional string mountPath = 2; + + // ReadOnly corresponds to the original VolumeMount. + // +optional + optional bool readOnly = 3; + + // RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). + // An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, + // depending on the mount result. + // +featureGate=RecursiveReadOnlyMounts + // +optional + optional string recursiveReadOnly = 4; +} + // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. message VolumeNodeAffinity { - // Required specifies hard node constraints that must be met. + // required specifies hard node constraints that must be met. optional NodeSelector required = 1; } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. message VolumeProjection { - // information about the secret data to project + // secret information about the secret data to project // +optional optional SecretProjection secret = 1; - // information about the downwardAPI data to project + // downwardAPI information about the downwardAPI data to project // +optional optional DownwardAPIProjection downwardAPI = 2; - // information about the configMap data to project + // configMap information about the configMap data to project // +optional optional ConfigMapProjection configMap = 3; - // information about the serviceAccountToken data to project + // serviceAccountToken is information about the serviceAccountToken data to project // +optional optional ServiceAccountTokenProjection serviceAccountToken = 4; + + // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + // of ClusterTrustBundle objects in an auto-updating file. + // + // Alpha, gated by the ClusterTrustBundleProjection feature gate. + // + // ClusterTrustBundle objects can either be selected by name, or by the + // combination of signer name and a label selector. + // + // Kubelet performs aggressive normalization of the PEM contents written + // into the pod filesystem. Esoteric PEM features such as inter-block + // comments and block headers are stripped. Certificates are deduplicated. + // The ordering of certificates within the file is arbitrary, and Kubelet + // may change the order over time. + // + // +featureGate=ClusterTrustBundleProjection + // +optional + optional ClusterTrustBundleProjection clusterTrustBundle = 5; +} + +// VolumeResourceRequirements describes the storage resource requirements for a volume. +message VolumeResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + map limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + map requests = 2; } // Represents the source of a volume to mount. // Only one of its members may be specified. message VolumeSource { - // HostPath represents a pre-existing file or directory on the host + // hostPath represents a pre-existing file or directory on the host // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. @@ -5553,131 +6531,131 @@ message VolumeSource { // +optional optional HostPathVolumeSource hostPath = 1; - // EmptyDir represents a temporary directory that shares a pod's lifetime. + // emptyDir represents a temporary directory that shares a pod's lifetime. // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional optional EmptyDirVolumeSource emptyDir = 2; - // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; - // GitRepo represents a git repository at a particular revision. + // gitRepo represents a git repository at a particular revision. // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. // +optional optional GitRepoVolumeSource gitRepo = 5; - // Secret represents a secret that should populate this volume. + // secret represents a secret that should populate this volume. // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional optional SecretVolumeSource secret = 6; - // NFS represents an NFS mount on the host that shares a pod's lifetime + // nfs represents an NFS mount on the host that shares a pod's lifetime // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional optional NFSVolumeSource nfs = 7; - // ISCSI represents an ISCSI Disk resource that is attached to a + // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional optional ISCSIVolumeSource iscsi = 8; - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; - // PersistentVolumeClaimVolumeSource represents a reference to a + // persistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; - // FlexVolume represents a generic volume resource that is + // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional optional FlexVolumeSource flexVolume = 12; - // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // cinder represents a cinder volume attached and mounted on kubelets host machine. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional optional CephFSVolumeSource cephfs = 14; - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running // +optional optional FlockerVolumeSource flocker = 15; - // DownwardAPI represents downward API about the pod that should populate this volume + // downwardAPI represents downward API about the pod that should populate this volume // +optional optional DownwardAPIVolumeSource downwardAPI = 16; - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional optional FCVolumeSource fc = 17; - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional optional AzureFileVolumeSource azureFile = 18; - // ConfigMap represents a configMap that should populate this volume + // configMap represents a configMap that should populate this volume // +optional optional ConfigMapVolumeSource configMap = 19; - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional optional QuobyteVolumeSource quobyte = 21; - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. // +optional optional AzureDiskVolumeSource azureDisk = 22; - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; - // Items for all in one resources secrets, configmaps, and downward API + // projected items for all in one resources secrets, configmaps, and downward API optional ProjectedVolumeSource projected = 26; - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine // +optional optional PortworxVolumeSource portworxVolume = 24; - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional optional ScaleIOVolumeSource scaleIO = 25; - // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional optional StorageOSVolumeSource storageos = 27; - // CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). // +optional optional CSIVolumeSource csi = 28; - // Ephemeral represents a volume that is handled by a cluster storage driver. + // ephemeral represents a volume that is handled by a cluster storage driver. // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, // and deleted when the pod is removed. // @@ -5704,24 +6682,42 @@ message VolumeSource { // // +optional optional EphemeralVolumeSource ephemeral = 29; + + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + optional ImageVolumeSource image = 30; } // Represents a vSphere volume resource. message VsphereVirtualDiskVolumeSource { - // Path that identifies vSphere volume vmdk + // volumePath is the path that identifies vSphere volume vmdk optional string volumePath = 1; - // Filesystem type to mount. + // fsType is filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional optional string fsType = 2; - // Storage Policy Based Management (SPBM) profile name. + // storagePolicyName is the storage Policy Based Management (SPBM) profile name. // +optional optional string storagePolicyName = 3; - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. // +optional optional string storagePolicyID = 4; } @@ -5756,12 +6752,9 @@ message WindowsSecurityContextOptions { optional string runAsUserName = 3; // HostProcess determines if a container should be run as a 'Host Process' container. - // This field is alpha-level and will only be honored by components that enable the - // WindowsHostProcessContainers feature flag. Setting this field without the feature - // flag will result in errors when validating the Pod. All of a Pod's containers must - // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - // containers and non-HostProcess containers). In addition, if HostProcess is true - // then HostNetwork must also be set to true. + // All of a Pod's containers must have the same effective HostProcess value + // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + // In addition, if HostProcess is true then HostNetwork must also be set to true. // +optional optional bool hostProcess = 4; } diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go index 9341abf89..e803d518b 100644 --- a/vendor/k8s.io/api/core/v1/toleration.go +++ b/vendor/k8s.io/api/core/v1/toleration.go @@ -28,15 +28,13 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { // ToleratesTaint checks if the toleration tolerates the taint. // The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, // -// otherwise taint effect must equal to toleration.effect. -// -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. -// -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. +// 1. Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// 2. If toleration.operator is 'Exists', it means to match all taint values. +// 3. Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. func (t *Toleration) ToleratesTaint(taint *Taint) bool { if len(t.Effect) > 0 && t.Effect != taint.Effect { return false diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 1a03afd2c..3a74138ba 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -34,11 +34,11 @@ const ( // Volume represents a named volume in a pod that may be accessed by any container in the pod. type Volume struct { - // Volume's name. + // name of the volume. // Must be a DNS_LABEL and unique within the pod. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // VolumeSource represents the location and type of the mounted volume. + // volumeSource represents the location and type of the mounted volume. // If not specified, the Volume is implied to be an EmptyDir. // This implied behavior is deprecated and will be removed in a future version. VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` @@ -47,7 +47,7 @@ type Volume struct { // Represents the source of a volume to mount. // Only one of its members may be specified. type VolumeSource struct { - // HostPath represents a pre-existing file or directory on the host + // hostPath represents a pre-existing file or directory on the host // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. @@ -57,104 +57,104 @@ type VolumeSource struct { // mount host directories as read/write. // +optional HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` - // EmptyDir represents a temporary directory that shares a pod's lifetime. + // emptyDir represents a temporary directory that shares a pod's lifetime. // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` - // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` - // GitRepo represents a git repository at a particular revision. + // gitRepo represents a git repository at a particular revision. // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. // +optional GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` - // Secret represents a secret that should populate this volume. + // secret represents a secret that should populate this volume. // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` - // NFS represents an NFS mount on the host that shares a pod's lifetime + // nfs represents an NFS mount on the host that shares a pod's lifetime // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` - // ISCSI represents an ISCSI Disk resource that is attached to a + // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://examples.k8s.io/volumes/iscsi/README.md // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` - // PersistentVolumeClaimVolumeSource represents a reference to a + // persistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` - // FlexVolume represents a generic volume resource that is + // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // cinder represents a cinder volume attached and mounted on kubelets host machine. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` - // DownwardAPI represents downward API about the pod that should populate this volume + // downwardAPI represents downward API about the pod that should populate this volume // +optional DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` - // ConfigMap represents a configMap that should populate this volume + // configMap represents a configMap that should populate this volume // +optional ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. // +optional AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` - // Items for all in one resources secrets, configmaps, and downward API + // projected items for all in one resources secrets, configmaps, and downward API Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine // +optional PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` - // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` - // CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). // +optional CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` - // Ephemeral represents a volume that is handled by a cluster storage driver. + // ephemeral represents a volume that is handled by a cluster storage driver. // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, // and deleted when the pod is removed. // @@ -181,6 +181,23 @@ type VolumeSource struct { // // +optional Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"` + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -188,10 +205,10 @@ type VolumeSource struct { // PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another // type of volume that is owned by someone else (the system). type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` - // Will force the ReadOnly setting in VolumeMounts. + // readOnly Will force the ReadOnly setting in VolumeMounts. // Default false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` @@ -200,85 +217,85 @@ type PersistentVolumeClaimVolumeSource struct { // PersistentVolumeSource is similar to VolumeSource but meant for the // administrator who creates PVs. Exactly one of its members must be set. type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` - // HostPath represents a directory on the host. + // hostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // +optional HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` - // Glusterfs represents a Glusterfs volume that is attached to a host and + // glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` - // NFS represents an NFS mount on the host. Provisioned by an admin. + // nfs represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` - // ISCSI represents an ISCSI Disk resource that is attached to a + // iscsi represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // cinder represents a cinder volume attached and mounted on kubelets host machine. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime // +optional CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` - // FlexVolume represents a generic volume resource that is + // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // azureFile represents an Azure File Service mount on the host and bind mount to the pod. // +optional AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. // +optional AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine // +optional PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` - // Local represents directly-attached storage with node affinity + // local represents directly-attached storage with node affinity // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // CSI represents storage that is handled by an external CSI driver (Beta feature). + // csi represents storage that is handled by an external CSI driver (Beta feature). // +optional CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } @@ -295,6 +312,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. @@ -306,13 +324,13 @@ type PersistentVolume struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines a specification of a persistent volume owned by the cluster. + // spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes // +optional Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status represents the current information/status for the persistent volume. + // status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes @@ -322,51 +340,64 @@ type PersistentVolume struct { // PersistentVolumeSpec is the specification of a persistent volume. type PersistentVolumeSpec struct { - // A description of the persistent volume's resources and capacity. + // capacity is the description of the persistent volume's resources and capacity. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // The actual volume backing the persistent volume. + // persistentVolumeSource is the actual volume backing the persistent volume. PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` - // AccessModes contains all ways the volume can be mounted. + // accessModes contains all ways the volume can be mounted. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes // +optional + // +listType=atomic AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding // +optional + // +structType=granular ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` - // What happens to a persistent volume when released from its claim. + // persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. // Valid options are Retain (default for manually created PersistentVolumes), Delete (default // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). // Recycle must be supported by the volume plugin underlying this PersistentVolume. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming // +optional PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` - // Name of StorageClass to which this persistent volume belongs. Empty value + // storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value // means that this volume does not belong to any StorageClass. // +optional StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will // simply fail if one is invalid. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional + // +listType=atomic MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` - // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // nodeAffinity defines constraints that limit what nodes this volume can be accessed from. // This field influences the scheduling of pods that use this volume. // +optional NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"` + // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value + // is not allowed. When this field is not set, it indicates that this volume does not belong to any + // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver + // after a volume has been updated successfully to a new class. + // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound + // PersistentVolumeClaims during the binding process. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. type VolumeNodeAffinity struct { - // Required specifies hard node constraints that must be met. + // required specifies hard node constraints that must be met. Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"` } @@ -399,20 +430,25 @@ const ( // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // phase indicates if a volume is available, bound to a claim, or released by a claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase // +optional Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` - // A human-readable message indicating details about why the volume is in this state. + // message is a human-readable message indicating details about why the volume is in this state. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` - // Reason is a brief CamelCase string that describes any failure and is meant + // reason is a brief CamelCase string that describes any failure and is meant // for machine parsing and tidy display in the CLI. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // lastPhaseTransitionTime is the time the phase transitioned from one to another + // and automatically resets to current time everytime a volume phase transitions. + // +optional + LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { @@ -421,13 +457,14 @@ type PersistentVolumeList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of persistent volumes. + // items is a list of persistent volumes. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { @@ -437,12 +474,12 @@ type PersistentVolumeClaim struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the desired characteristics of a volume requested by a pod author. + // spec defines the desired characteristics of a volume requested by a pod author. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status represents the current information/status of a persistent volume claim. + // status represents the current information/status of a persistent volume claim. // Read-only. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional @@ -450,6 +487,7 @@ type PersistentVolumeClaim struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { @@ -458,7 +496,7 @@ type PersistentVolumeClaimList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // A list of persistent volume claims. + // items is a list of persistent volume claims. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -466,24 +504,25 @@ type PersistentVolumeClaimList struct { // PersistentVolumeClaimSpec describes the common attributes of storage devices // and allows a Source for provider-specific attributes type PersistentVolumeClaimSpec struct { - // AccessModes contains the desired access modes the volume should have. + // accessModes contains the desired access modes the volume should have. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // A label query over volumes to consider for binding. + // selector is a label query over volumes to consider for binding. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - // Resources represents the minimum resources the volume should have. + // resources represents the minimum resources the volume should have. // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements // that are lower than previous value but must still be higher than capacity recorded in the // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` - // VolumeName is the binding reference to the PersistentVolume backing this claim. + Resources VolumeResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` - // Name of the StorageClass required by the claim. + // storageClassName is the name of the StorageClass required by the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` @@ -491,122 +530,283 @@ type PersistentVolumeClaimSpec struct { // Value of Filesystem is implied when not included in claim spec. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` - // This field can be used to specify either: + // dataSource field can be used to specify either: // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) // * An existing PVC (PersistentVolumeClaim) // If the provisioner or an external controller can support the specified data source, // it will create a new volume based on the contents of the specified data source. - // If the AnyVolumeDataSource feature gate is enabled, this field will always have - // the same contents as the DataSourceRef field. + // When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + // and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + // If the namespace is specified, then dataSourceRef will not be copied to dataSource. // +optional DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` - // Specifies the object from which to populate the volume with data, if a non-empty - // volume is desired. This may be any local object from a non-empty API group (non + // dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + // volume is desired. This may be any object from a non-empty API group (non // core object) or a PersistentVolumeClaim object. // When this field is specified, volume binding will only succeed if the type of // the specified object matches some installed volume populator or dynamic // provisioner. - // This field will replace the functionality of the DataSource field and as such + // This field will replace the functionality of the dataSource field and as such // if both fields are non-empty, they must have the same value. For backwards - // compatibility, both fields (DataSource and DataSourceRef) will be set to the same + // compatibility, when namespace isn't specified in dataSourceRef, + // both fields (dataSource and dataSourceRef) will be set to the same // value automatically if one of them is empty and the other is non-empty. - // There are two important differences between DataSource and DataSourceRef: - // * While DataSource only allows two specific types of objects, DataSourceRef + // When namespace is specified in dataSourceRef, + // dataSource isn't set to the same value and must be empty. + // There are three important differences between dataSource and dataSourceRef: + // * While dataSource only allows two specific types of objects, dataSourceRef // allows any non-core object, as well as PersistentVolumeClaim objects. - // * While DataSource ignores disallowed values (dropping them), DataSourceRef + // * While dataSource ignores disallowed values (dropping them), dataSourceRef // preserves all values, and generates an error if a disallowed value is // specified. - // (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // * While dataSource only allows local objects, dataSourceRef allows objects + // in any namespaces. + // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + // +optional + DataSourceRef *TypedObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` + // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + // If specified, the CSI driver will create or update the volume with the attributes defined + // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + // will be set by the persistentvolume controller if it exists. + // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + // exists. + // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). + // +featureGate=VolumeAttributesClass + // +optional + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` +} + +type TypedObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. // +optional - DataSourceRef *TypedLocalObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` + APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace is the namespace of resource being referenced + // Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + // (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + // +featureGate=CrossNamespaceVolumeDataSource + // +optional + Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` } -// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +// PersistentVolumeClaimConditionType defines the condition of PV claim. +// Valid values are: +// - "Resizing", "FileSystemResizePending" +// +// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected: +// - "ControllerResizeError", "NodeResizeError" +// +// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected: +// - "ModifyVolumeError", "ModifyingVolume" type PersistentVolumeClaimConditionType string +// These are valid conditions of PVC const ( // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" + + // PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller + PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError" + // PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node. + PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError" + + // Applying the target VolumeAttributesClass encountered an error + PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" + // Volume is being modified + PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume" ) // +enum -type PersistentVolumeClaimResizeStatus string +// When a controller receives persistentvolume claim update with ClaimResourceStatus for a resource +// that it does not recognizes, then it should ignore that update and let other controllers +// handle it. +type ClaimResourceStatus string const ( - // When expansion is complete, the empty string is set by resize controller or kubelet. - PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = "" - // State set when resize controller starts expanding the volume in control-plane - PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress" - // State set when expansion has failed in resize controller with a terminal error. - // Transient errors such as timeout should not set this status and should leave ResizeStatus + // State set when resize controller starts resizing the volume in control-plane. + PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress" + + // State set when resize has failed in resize controller with a terminal unrecoverable error. + // Transient errors such as timeout should not set this status and should leave allocatedResourceStatus // unmodified, so as resize controller can resume the volume expansion. - PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed" - // State set when resize controller has finished expanding the volume but further expansion is needed on the node. - PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending" - // State set when kubelet starts expanding the volume. - PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress" - // State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed. - PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed" + PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible" + + // State set when resize controller has finished resizing the volume but further resizing of volume + // is needed on the node. + PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending" + // State set when kubelet starts resizing the volume. + PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress" + // State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors + // shouldn't set this status + PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible" +) + +// +enum +// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately +type PersistentVolumeClaimModifyVolumeStatus string + +const ( + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing + PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending" + // InProgress indicates that the volume is being modified + PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress" + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified + PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible" ) -// PersistentVolumeClaimCondition contails details about state of pvc +// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation +type ModifyVolumeStatus struct { + // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled + TargetVolumeAttributesClassName string `json:"targetVolumeAttributesClassName,omitempty" protobuf:"bytes,1,opt,name=targetVolumeAttributesClassName"` + // status is the status of the ControllerModifyVolume operation. It can be in any of following states: + // - Pending + // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as + // the specified VolumeAttributesClass not existing. + // - InProgress + // InProgress indicates that the volume is being modified. + // - Infeasible + // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To + // resolve the error, a valid VolumeAttributesClass needs to be specified. + // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. + Status PersistentVolumeClaimModifyVolumeStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=PersistentVolumeClaimModifyVolumeStatus"` +} + +// PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time we probed the condition. + // lastProbeTime is the time we probed the condition. // +optional LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transitioned from one status to another. + // lastTransitionTime is the time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // Unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // reason is a unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "Resizing" that means the underlying // persistent volume is being resized. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human-readable message indicating details about last transition. + // message is the human-readable message indicating details about last transition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim. + // phase represents the current phase of PersistentVolumeClaim. // +optional Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` - // AccessModes contains the actual access modes the volume backing the PVC has. + // accessModes contains the actual access modes the volume backing the PVC has. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 // +optional + // +listType=atomic AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // Represents the actual resources of the underlying volume. + // capacity represents the actual resources of the underlying volume. // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // Current Condition of persistent volume claim. If underlying persistent volume is being - // resized then the Condition will be set to 'ResizeStarted'. + // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'Resizing'. // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` - // The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may - // be larger than the actual capacity when a volume expansion operation is requested. + // allocatedResources tracks the resources allocated to a PVC including its capacity. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // Capacity reported here may be larger than the actual capacity when a volume expansion operation + // is requested. // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. // If a volume expansion capacity request is lowered, allocatedResources is only // lowered if there are no expansion operations in progress and if the actual volume capacity // is equal or lower than the requested capacity. + // + // A controller that receives PVC update with previously unknown resourceName + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure // +optional AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` - // ResizeStatus stores status of resize operation. - // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty - // string by resize controller or kubelet. + + // resizestatus is tombstoned since the field was replaced by allocatedResourceStatus + // ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"` + + // allocatedResourceStatuses stores status of resource being resized for the given PVC. + // Key names follow standard Kubernetes label syntax. Valid values are either: + // * Un-prefixed keys: + // - storage - the capacity of the volume. + // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" + // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered + // reserved and hence may not be used. + // + // ClaimResourceStatus can be in any of following states: + // - ControllerResizeInProgress: + // State set when resize controller starts resizing the volume in control-plane. + // - ControllerResizeFailed: + // State set when resize has failed in resize controller with a terminal error. + // - NodeResizePending: + // State set when resize controller has finished resizing the volume but further resizing of + // volume is needed on the node. + // - NodeResizeInProgress: + // State set when kubelet starts resizing the volume. + // - NodeResizeFailed: + // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set + // NodeResizeFailed. + // For example: if expanding a PVC for more capacity - this field can be one of the following states: + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress" + // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed" + // When this field is not set, it means that no resize operation is in progress for the given PVC. + // + // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus + // should ignore the update for the purpose it was designed. For example - a controller that + // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid + // resources associated with PVC. + // // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure + // +mapType=granular + // +optional + AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` + // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. + // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). + // +featureGate=VolumeAttributesClass // +optional - ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"` + CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` + // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. + // When this is unset, there is no ModifyVolume operation being attempted. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). + // +featureGate=VolumeAttributesClass + // +optional + ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` } // +enum @@ -684,11 +884,11 @@ const ( // Represents a host path mapped into a pod. // Host path volumes do not support ownership management or SELinux relabeling. type HostPathVolumeSource struct { - // Path of the directory on the host. + // path of the directory on the host. // If the path is a symlink, it will follow the link to the real path. // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - // Type for HostPath Volume + // type for HostPath Volume // Defaults to "" // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath // +optional @@ -698,18 +898,18 @@ type HostPathVolumeSource struct { // Represents an empty directory for a pod. // Empty directory volumes support ownership management and SELinux relabeling. type EmptyDirVolumeSource struct { - // What type of storage medium should back this directory. + // medium represents what type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` - // Total amount of local storage required for this EmptyDir volume. + // sizeLimit is the total amount of local storage required for this EmptyDir volume. // The size limit is also applicable for memory medium. // The maximum usage on memory medium EmptyDir would be the minimum value between // the SizeLimit specified here and the sum of memory limits of all containers in a pod. // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } @@ -717,15 +917,15 @@ type EmptyDirVolumeSource struct { // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { - // EndpointsName is the endpoint name that details Glusterfs topology. + // endpoints is the endpoint name that details Glusterfs topology. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` - // Path is the Glusterfs volume path. + // path is the Glusterfs volume path. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // readOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional @@ -735,21 +935,21 @@ type GlusterfsVolumeSource struct { // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsPersistentVolumeSource struct { - // EndpointsName is the endpoint name that details Glusterfs topology. + // endpoints is the endpoint name that details Glusterfs topology. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` - // Path is the Glusterfs volume path. + // path is the Glusterfs volume path. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // readOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // endpointsNamespace is the namespace that contains Glusterfs endpoint. // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod // +optional @@ -759,41 +959,45 @@ type GlusterfsPersistentVolumeSource struct { // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { - // A collection of Ceph monitors. + // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // The rados image name. + // image is the rados image name. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // The rados pool name. + // pool is the rados pool name. // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` - // The rados user name. + // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` - // Keyring is the path to key ring for RBDUser. + // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` - // SecretRef is name of the authentication secret for RBDUser. If provided + // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional @@ -803,41 +1007,45 @@ type RBDVolumeSource struct { // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDPersistentVolumeSource struct { - // A collection of Ceph monitors. + // monitors is a collection of Ceph monitors. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +listType=atomic CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // The rados image name. + // image is the rados image name. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // The rados pool name. + // pool is the rados pool name. // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` - // The rados user name. + // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` - // Keyring is the path to key ring for RBDUser. + // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` - // SecretRef is name of the authentication secret for RBDUser. If provided + // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional @@ -849,21 +1057,21 @@ type RBDPersistentVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { - // volume id used to identify the volume in cinder. + // volumeID used to identify the volume in cinder. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // Optional: points to a secret object containing parameters used to connect + // secretRef is optional: points to a secret object containing parameters used to connect // to OpenStack. // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` @@ -874,21 +1082,21 @@ type CinderVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderPersistentVolumeSource struct { - // volume id used to identify the volume in cinder. + // volumeID used to identify the volume in cinder. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type to mount. + // fsType Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // Optional: points to a secret object containing parameters used to connect + // secretRef is Optional: points to a secret object containing parameters used to connect // to OpenStack. // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` @@ -897,25 +1105,26 @@ type CinderPersistentVolumeSource struct { // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors + // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` - // Optional: User is the rados user name, default is admin + // user is optional: User is the rados user name, default is admin // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional @@ -926,10 +1135,10 @@ type CephFSVolumeSource struct { // in any namespace // +structType=atomic type SecretReference struct { - // Name is unique within a namespace to reference a secret resource. + // name is unique within a namespace to reference a secret resource. // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Namespace defines the space within which the secret name must be unique. + // namespace defines the space within which the secret name must be unique. // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` } @@ -937,25 +1146,26 @@ type SecretReference struct { // Represents a Ceph Filesystem mount that lasts the lifetime of a pod // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSPersistentVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors + // monitors is Required: Monitors is a collection of Ceph monitors // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +listType=atomic Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` - // Optional: User is the rados user name, default is admin + // user is Optional: User is the rados user name, default is admin // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it // +optional @@ -966,11 +1176,11 @@ type CephFSPersistentVolumeSource struct { // One and only one of datasetName and datasetUUID should be set. // Flocker volumes do not support ownership management or SELinux relabeling. type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker + // datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker // should be considered as deprecated // +optional DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"` - // UUID of the dataset. This is unique identifier of a Flocker dataset + // datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset // +optional DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"` } @@ -1005,24 +1215,24 @@ const ( // can only be mounted as read/write once or read-only many times. GCE // PDs support ownership management and SELinux relabeling. type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` - // Filesystem type of the volume that you want to mount. + // fsType is filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // The partition in the volume that you want to mount. + // partition is the partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional @@ -1032,30 +1242,30 @@ type GCEPersistentDiskVolumeSource struct { // Represents a Quobyte mount that lasts the lifetime of a pod. // Quobyte volumes do not support ownership management or SELinux relabeling. type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services + // registry represents a single or multiple Quobyte Registry services // specified as a string as host:port pair (multiple entries are separated with commas) // which acts as the central registry for volumes Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"` - // Volume is a string that references an already created Quobyte volume by name. + // volume is a string that references an already created Quobyte volume by name. Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"` - // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // readOnly here will force the Quobyte volume to be mounted with read-only permissions. // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // User to map volume access to + // user to map volume access to // Defaults to serivceaccount user // +optional User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"` - // Group to map volume access to + // group to map volume access to // Default is no group // +optional Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` - // Tenant owning the given Quobyte volume in the Backend + // tenant owning the given Quobyte volume in the Backend // Used with dynamically provisioned Quobyte volumes, value is set by the plugin // +optional Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"` @@ -1064,25 +1274,25 @@ type QuobyteVolumeSource struct { // FlexPersistentVolumeSource represents a generic persistent volume resource that is // provisioned/attached using an exec based plugin. type FlexPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. + // driver is the name of the driver to use for this volume. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - // Filesystem type to mount. + // fsType is the Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: SecretRef is reference to the secret object containing + // secretRef is Optional: SecretRef is reference to the secret object containing // sensitive information to pass to the plugin scripts. This may be // empty if no secret object is specified. If the secret object // contains more than one secret, all secrets are passed to the plugin // scripts. // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // Optional: Extra command options if any. + // options is Optional: this field holds extra command options if any. // +optional Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` } @@ -1090,25 +1300,25 @@ type FlexPersistentVolumeSource struct { // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. + // driver is the name of the driver to use for this volume. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: SecretRef is reference to the secret object containing + // secretRef is Optional: secretRef is reference to the secret object containing // sensitive information to pass to the plugin scripts. This may be // empty if no secret object is specified. If the secret object // contains more than one secret, all secrets are passed to the plugin // scripts. // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // Optional: Extra command options if any. + // options is Optional: this field holds extra command options if any. // +optional Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` } @@ -1120,24 +1330,23 @@ type FlexVolumeSource struct { // can only be mounted as read/write once. AWS EBS volumes support // ownership management and SELinux relabeling. type AWSElasticBlockStoreVolumeSource struct { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // The partition in the volume that you want to mount. + // partition is the partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). // +optional Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". + // readOnly value true will force the readOnly setting in VolumeMounts. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` @@ -1151,12 +1360,12 @@ type AWSElasticBlockStoreVolumeSource struct { // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. type GitRepoVolumeSource struct { - // Repository URL + // repository is the URL Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` - // Commit hash for the specified revision. + // revision is the commit hash for the specified revision. // +optional Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` - // Target directory name. + // directory is the target directory name. // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the // git repository. Otherwise, if specified, the volume will contain the git repository in // the subdirectory with the given name. @@ -1170,11 +1379,11 @@ type GitRepoVolumeSource struct { // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. + // secretName is the name of the secret in the pod's namespace to use. // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` - // If unspecified, each key-value pair in the Data field of the referenced + // items If unspecified, each key-value pair in the Data field of the referenced // Secret will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -1182,8 +1391,9 @@ type SecretVolumeSource struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Optional: mode bits used to set permissions on created files by default. + // defaultMode is Optional: mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values // for mode bits. Defaults to 0644. @@ -1192,7 +1402,7 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or its keys must be defined + // optional field specify whether the Secret or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1209,7 +1419,7 @@ const ( // mode. type SecretProjection struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // If unspecified, each key-value pair in the Data field of the referenced + // items if unspecified, each key-value pair in the Data field of the referenced // Secret will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -1217,8 +1427,9 @@ type SecretProjection struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the Secret or its key must be defined + // optional field specify whether the Secret or its key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1226,16 +1437,15 @@ type SecretProjection struct { // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server. + // server is the hostname or IP address of the NFS server. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Server string `json:"server" protobuf:"bytes,1,opt,name=server"` - // Path that is exported by the NFS server. + // path that is exported by the NFS server. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. + // readOnly here will force the NFS export to be mounted with read-only permissions. // Defaults to false. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional @@ -1246,42 +1456,44 @@ type NFSVolumeSource struct { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIVolumeSource struct { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` - // Target iSCSI Qualified Name. + // iqn is the target iSCSI Qualified Name. IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI Target Lun number. + // lun represents iSCSI Target Lun number. Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // iSCSI Interface Name that uses an iSCSI transport. + // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + // portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` - // whether support iSCSI Discovery CHAP authentication + // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication // +optional DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` - // whether support iSCSI Session CHAP authentication + // chapAuthSession defines whether support iSCSI Session CHAP authentication // +optional SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` - // CHAP Secret for iSCSI target and initiator authentication + // secretRef is the CHAP Secret for iSCSI target and initiator authentication // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` - // Custom iSCSI Initiator Name. + // initiatorName is the custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1292,42 +1504,44 @@ type ISCSIVolumeSource struct { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIPersistentVolumeSource struct { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` - // Target iSCSI Qualified Name. + // iqn is Target iSCSI Qualified Name. IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI Target Lun number. + // lun is iSCSI Target Lun number. Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // iSCSI Interface Name that uses an iSCSI transport. + // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` - // Filesystem type of the volume that you want to mount. + // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // readOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional + // +listType=atomic Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` - // whether support iSCSI Discovery CHAP authentication + // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication // +optional DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` - // whether support iSCSI Session CHAP authentication + // chapAuthSession defines whether support iSCSI Session CHAP authentication // +optional SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` - // CHAP Secret for iSCSI target and initiator authentication + // secretRef is the CHAP Secret for iSCSI target and initiator authentication // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` - // Custom iSCSI Initiator Name. + // initiatorName is the custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1338,35 +1552,37 @@ type ISCSIPersistentVolumeSource struct { // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. type FCVolumeSource struct { - // Optional: FC target worldwide names (WWNs) + // targetWWNs is Optional: FC target worldwide names (WWNs) // +optional + // +listType=atomic TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"` - // Optional: FC target lun number + // lun is Optional: FC target lun number // +optional Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // TODO: how do we prevent errors in the filesystem from compromising the machine // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force + // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // Optional: FC volume world wide identifiers (wwids) + // wwids Optional: FC volume world wide identifiers (wwids) // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. // +optional + // +listType=atomic WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"` } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key + // secretName is the name of secret that contains Azure Storage Account Name and Key SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` - // Share Name + // shareName is the azure share Name ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` @@ -1374,15 +1590,15 @@ type AzureFileVolumeSource struct { // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. type AzureFilePersistentVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key + // secretName is the name of secret that contains Azure Storage Account Name and Key SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` - // Share Name + // shareName is the azure Share Name ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // the namespace of the secret that contains Azure Storage Account Name and Key + // secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key // default is the same as the Pod // +optional SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"` @@ -1390,26 +1606,26 @@ type AzureFilePersistentVolumeSource struct { // Represents a vSphere volume resource. type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk + // volumePath is the path that identifies vSphere volume vmdk VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` - // Filesystem type to mount. + // fsType is filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Storage Policy Based Management (SPBM) profile name. + // storagePolicyName is the storage Policy Based Management (SPBM) profile name. // +optional StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"` - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. // +optional StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"` } // Represents a Photon Controller persistent disk resource. type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk + // pdID is the ID that identifies Photon Controller persistent disk PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` @@ -1433,35 +1649,39 @@ const ( // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage + // diskName is the Name of the data disk in the blob storage DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"` - // The URI the data disk in the blob storage + // diskURI is the URI of data disk in the blob storage DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` - // Host Caching mode: None, Read Only, Read Write. + // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` - // Filesystem type to mount. + // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` - // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` } // PortworxVolumeSource represents a Portworx volume resource. type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume + // volumeID uniquely identifies a Portworx volume VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // FSType represents the filesystem type to mount + // fSType represents the filesystem type to mount // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` @@ -1469,36 +1689,38 @@ type PortworxVolumeSource struct { // ScaleIOVolumeSource represents a persistent ScaleIO volume type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. + // gateway is the host address of the ScaleIO API Gateway. Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` - // The name of the storage system as configured in ScaleIO. + // system is the name of the storage system as configured in ScaleIO. System string `json:"system" protobuf:"bytes,2,opt,name=system"` - // SecretRef references to the secret for ScaleIO user and other + // secretRef references to the secret for ScaleIO user and other // sensitive information. If this is not provided, Login operation will fail. SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` - // Flag to enable/disable SSL communication with Gateway, default false + // sslEnabled Flag enable/disable SSL communication with Gateway, default false // +optional SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` - // The name of the ScaleIO Protection Domain for the configured storage. + // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. // +optional ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` - // The ScaleIO Storage Pool associated with the protection domain. + // storagePool is the ScaleIO Storage Pool associated with the protection domain. // +optional StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` - // The name of a volume already created in the ScaleIO system + // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` @@ -1506,36 +1728,38 @@ type ScaleIOVolumeSource struct { // ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume type ScaleIOPersistentVolumeSource struct { - // The host address of the ScaleIO API Gateway. + // gateway is the host address of the ScaleIO API Gateway. Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` - // The name of the storage system as configured in ScaleIO. + // system is the name of the storage system as configured in ScaleIO. System string `json:"system" protobuf:"bytes,2,opt,name=system"` - // SecretRef references to the secret for ScaleIO user and other + // secretRef references to the secret for ScaleIO user and other // sensitive information. If this is not provided, Login operation will fail. SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` - // Flag to enable/disable SSL communication with Gateway, default false + // sslEnabled is the flag to enable/disable SSL communication with Gateway, default false // +optional SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` - // The name of the ScaleIO Protection Domain for the configured storage. + // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. // +optional ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` - // The ScaleIO Storage Pool associated with the protection domain. + // storagePool is the ScaleIO Storage Pool associated with the protection domain. // +optional StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` - // The name of a volume already created in the ScaleIO system + // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` @@ -1543,10 +1767,10 @@ type ScaleIOPersistentVolumeSource struct { // Represents a StorageOS persistent volume resource. type StorageOSVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume + // volumeName is the human-readable name of the StorageOS volume. Volume // names are only unique within a namespace. VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` - // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // volumeNamespace specifies the scope of the volume within StorageOS. If no // namespace is specified then the Pod's namespace will be used. This allows the // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. // Set VolumeName to any name to override the default behaviour. @@ -1554,16 +1778,16 @@ type StorageOSVolumeSource struct { // Namespaces that do not pre-exist within StorageOS will be created. // +optional VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // SecretRef specifies the secret to use for obtaining the StorageOS API + // secretRef specifies the secret to use for obtaining the StorageOS API // credentials. If not specified, default values will be attempted. // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` @@ -1571,10 +1795,10 @@ type StorageOSVolumeSource struct { // Represents a StorageOS persistent volume resource. type StorageOSPersistentVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume + // volumeName is the human-readable name of the StorageOS volume. Volume // names are only unique within a namespace. VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` - // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // volumeNamespace specifies the scope of the volume within StorageOS. If no // namespace is specified then the Pod's namespace will be used. This allows the // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. // Set VolumeName to any name to override the default behaviour. @@ -1582,16 +1806,16 @@ type StorageOSPersistentVolumeSource struct { // Namespaces that do not pre-exist within StorageOS will be created. // +optional VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force + // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // SecretRef specifies the secret to use for obtaining the StorageOS API + // secretRef specifies the secret to use for obtaining the StorageOS API // credentials. If not specified, default values will be attempted. // +optional SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` @@ -1605,7 +1829,7 @@ type StorageOSPersistentVolumeSource struct { // ConfigMap volumes support ownership management and SELinux relabeling. type ConfigMapVolumeSource struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // If unspecified, each key-value pair in the Data field of the referenced + // items if unspecified, each key-value pair in the Data field of the referenced // ConfigMap will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -1613,8 +1837,9 @@ type ConfigMapVolumeSource struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Optional: mode bits used to set permissions on created files by default. + // defaultMode is optional: mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. // Defaults to 0644. @@ -1623,7 +1848,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or its keys must be defined + // optional specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1641,7 +1866,7 @@ const ( // mode. type ConfigMapProjection struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // If unspecified, each key-value pair in the Data field of the referenced + // items if unspecified, each key-value pair in the Data field of the referenced // ConfigMap will be projected into the volume as a file whose name is the // key and content is the value. If specified, the listed keys will be // projected into the specified paths, and unlisted keys will not be @@ -1649,8 +1874,9 @@ type ConfigMapProjection struct { // the volume setup will error unless it is marked optional. Paths must be // relative and may not contain the '..' path or start with '..'. // +optional + // +listType=atomic Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or its keys must be defined + // optional specify whether the ConfigMap or its keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1660,31 +1886,67 @@ type ConfigMapProjection struct { // the pods runtime filesystem for use against APIs (Kubernetes API Server or // otherwise). type ServiceAccountTokenProjection struct { - // Audience is the intended audience of the token. A recipient of a token + // audience is the intended audience of the token. A recipient of a token // must identify itself with an identifier specified in the audience of the // token, and otherwise should reject the token. The audience defaults to the // identifier of the apiserver. - //+optional + // +optional Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"` - // ExpirationSeconds is the requested duration of validity of the service + // expirationSeconds is the requested duration of validity of the service // account token. As the token approaches expiration, the kubelet volume // plugin will proactively rotate the service account token. The kubelet will // start trying to rotate the token if the token is older than 80 percent of // its time to live or if the token is older than 24 hours.Defaults to 1 hour // and must be at least 10 minutes. - //+optional + // +optional ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` - // Path is the path relative to the mount point of the file to project the + // path is the path relative to the mount point of the file to project the // token into. Path string `json:"path" protobuf:"bytes,3,opt,name=path"` } +// ClusterTrustBundleProjection describes how to select a set of +// ClusterTrustBundle objects and project their contents into the pod +// filesystem. +type ClusterTrustBundleProjection struct { + // Select a single ClusterTrustBundle by object name. Mutually-exclusive + // with signerName and labelSelector. + // +optional + Name *string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` + + // Select all ClusterTrustBundles that match this signer name. + // Mutually-exclusive with name. The contents of all selected + // ClusterTrustBundles will be unified and deduplicated. + // +optional + SignerName *string `json:"signerName,omitempty" protobuf:"bytes,2,rep,name=signerName"` + + // Select all ClusterTrustBundles that match this label selector. Only has + // effect if signerName is set. Mutually-exclusive with name. If unset, + // interpreted as "match nothing". If set but empty, interpreted as "match + // everything". + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,3,rep,name=labelSelector"` + + // If true, don't block pod startup if the referenced ClusterTrustBundle(s) + // aren't available. If using name, then the named ClusterTrustBundle is + // allowed not to exist. If using signerName, then the combination of + // signerName and labelSelector is allowed to match zero + // ClusterTrustBundles. + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,5,opt,name=optional"` + + // Relative path from the volume root to write the bundle. + Path string `json:"path" protobuf:"bytes,4,rep,name=path"` +} + // Represents a projected volume source type ProjectedVolumeSource struct { - // list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional + // +listType=atomic Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` - // Mode bits used to set permissions on created files by default. + // defaultMode are the mode bits used to set permissions on created files by default. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. // Directories within the path are not affected by this setting. @@ -1694,22 +1956,39 @@ type ProjectedVolumeSource struct { DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project + // secret information about the secret data to project // +optional Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` - // information about the downwardAPI data to project + // downwardAPI information about the downwardAPI data to project // +optional DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` - // information about the configMap data to project + // configMap information about the configMap data to project // +optional ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` - // information about the serviceAccountToken data to project + // serviceAccountToken is information about the serviceAccountToken data to project // +optional ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"` + + // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + // of ClusterTrustBundle objects in an auto-updating file. + // + // Alpha, gated by the ClusterTrustBundleProjection feature gate. + // + // ClusterTrustBundle objects can either be selected by name, or by the + // combination of signer name and a label selector. + // + // Kubelet performs aggressive normalization of the PEM contents written + // into the pod filesystem. Esoteric PEM features such as inter-block + // comments and block headers are stripped. Certificates are deduplicated. + // The ordering of certificates within the file is arbitrary, and Kubelet + // may change the order over time. + // + // +featureGate=ClusterTrustBundleProjection + // +optional + ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"` } const ( @@ -1718,15 +1997,15 @@ const ( // Maps a string key to a path within a volume. type KeyToPath struct { - // The key to project. + // key is the key to project. Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // The relative path of the file to map the key to. + // path is the relative path of the file to map the key to. // May not be an absolute path. // May not contain the path element '..'. // May not start with the string '..'. Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - // Optional: mode bits used to set permissions on this file. + // mode is Optional: mode bits used to set permissions on this file. // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. // If not specified, the volume defaultMode will be used. @@ -1738,11 +2017,11 @@ type KeyToPath struct { // Local represents directly-attached storage with node affinity (Beta feature) type LocalVolumeSource struct { - // The full path to the volume on the node. + // path of the full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - // Filesystem type to mount. + // fsType is the filesystem type to mount. // It applies only when the Path is a block device. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified. @@ -1752,31 +2031,30 @@ type LocalVolumeSource struct { // Represents storage that is managed by an external CSI volume driver (Beta feature) type CSIPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. + // driver is the name of the driver to use for this volume. // Required. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - // VolumeHandle is the unique volume name returned by the CSI volume + // volumeHandle is the unique volume name returned by the CSI volume // plugin’s CreateVolume to refer to the volume on all subsequent calls. // Required. VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"` - // Optional: The value to pass to ControllerPublishVolumeRequest. + // readOnly value to pass to ControllerPublishVolumeRequest. // Defaults to false (read/write). // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. + // fsType to mount. Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` - // Attributes of the volume to publish. + // volumeAttributes of the volume to publish. // +optional VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"` - // ControllerPublishSecretRef is a reference to the secret object containing + // controllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -1784,7 +2062,7 @@ type CSIPersistentVolumeSource struct { // +optional ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` - // NodeStageSecretRef is a reference to the secret object containing sensitive + // nodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -1792,7 +2070,7 @@ type CSIPersistentVolumeSource struct { // +optional NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` - // NodePublishSecretRef is a reference to the secret object containing + // nodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -1800,39 +2078,46 @@ type CSIPersistentVolumeSource struct { // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` - // ControllerExpandSecretRef is a reference to the secret object containing + // controllerExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerExpandVolume call. - // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` + + // nodeExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodeExpandVolume call. + // This field is optional, may be omitted if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } // Represents a source location of a volume to mount, managed by an external CSI driver type CSIVolumeSource struct { - // Driver is the name of the CSI driver that handles this volume. + // driver is the name of the CSI driver that handles this volume. // Consult with your admin for the correct name as registered in the cluster. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - // Specifies a read-only configuration for the volume. + // readOnly specifies a read-only configuration for the volume. // Defaults to false (read/write). // +optional ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` - // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // fsType to mount. Ex. "ext4", "xfs", "ntfs". // If not provided, the empty value is passed to the associated CSI driver // which will determine the default filesystem to apply. // +optional FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // VolumeAttributes stores driver-specific properties that are passed to the CSI + // volumeAttributes stores driver-specific properties that are passed to the CSI // driver. Consult your driver's documentation for supported values. // +optional VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"` - // NodePublishSecretRef is a reference to the secret object containing + // nodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. // This field is optional, and may be empty if no secret is required. If the @@ -1921,6 +2206,26 @@ type VolumeMount struct { // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + // RecursiveReadOnly specifies whether read-only mounts should be handled + // recursively. + // + // If ReadOnly is false, this field has no meaning and must be unspecified. + // + // If ReadOnly is true, and this field is set to Disabled, the mount is not made + // recursively read-only. If this field is set to IfPossible, the mount is made + // recursively read-only, if it is supported by the container runtime. If this + // field is set to Enabled, the mount is made recursively read-only if it is + // supported by the container runtime, otherwise the pod will not be started and + // an error will be generated to indicate the reason. + // + // If this field is set to IfPossible or Enabled, MountPropagation must be set to + // None (or be unspecified, which defaults to None). + // + // If this field is not specified, it is treated as an equivalent of Disabled. + // + // +featureGate=RecursiveReadOnlyMounts + // +optional + RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,7,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"` // Path within the container at which the volume should be mounted. Must // not contain ':'. MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` @@ -1932,6 +2237,8 @@ type VolumeMount struct { // to container and the other way around. // When not set, MountPropagationNone is used. // This field is beta in 1.10. + // When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + // (which defaults to None). // +optional MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` // Expanded path within the volume from which the container's volume should be mounted. @@ -1968,6 +2275,18 @@ const ( MountPropagationBidirectional MountPropagationMode = "Bidirectional" ) +// RecursiveReadOnlyMode describes recursive-readonly mode. +type RecursiveReadOnlyMode string + +const ( + // RecursiveReadOnlyDisabled disables recursive-readonly mode. + RecursiveReadOnlyDisabled RecursiveReadOnlyMode = "Disabled" + // RecursiveReadOnlyIfPossible enables recursive-readonly mode if possible. + RecursiveReadOnlyIfPossible RecursiveReadOnlyMode = "IfPossible" + // RecursiveReadOnlyEnabled enables recursive-readonly mode, or raise an error. + RecursiveReadOnlyEnabled RecursiveReadOnlyMode = "Enabled" +) + // volumeDevice describes a mapping of a raw block device within a container. type VolumeDevice struct { // name must match the name of a persistentVolumeClaim in the pod @@ -2105,7 +2424,8 @@ type SecretEnvSource struct { // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The header field value Value string `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -2130,6 +2450,7 @@ type HTTPGetAction struct { Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` // Custom headers to set in the request. HTTP allows repeated headers. // +optional + // +listType=atomic HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` } @@ -2176,9 +2497,16 @@ type ExecAction struct { // a shell, you need to explicitly call out to that shell. // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. // +optional + // +listType=atomic Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` } +// SleepAction describes a "sleep" action. +type SleepAction struct { + // Seconds is the number of seconds to sleep. + Seconds int64 `json:"seconds" protobuf:"bytes,1,opt,name=seconds"` +} + // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { @@ -2232,6 +2560,33 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) +// ResourceResizeRestartPolicy specifies how to handle container resource resize. +type ResourceResizeRestartPolicy string + +// These are the valid resource resize restart policy values: +const ( + // 'NotRequired' means Kubernetes will try to resize the container + // without restarting it, if possible. Kubernetes may however choose to + // restart the container if it is unable to actuate resize without a + // restart. For e.g. the runtime doesn't support restart-free resizing. + NotRequired ResourceResizeRestartPolicy = "NotRequired" + // 'RestartContainer' means Kubernetes will resize the container in-place + // by stopping and starting the container when new resources are applied. + // This is needed for legacy applications. For e.g. java apps using the + // -xmxN flag which are unable to use resized memory without restarting. + RestartContainer ResourceResizeRestartPolicy = "RestartContainer" +) + +// ContainerResizePolicy represents resource resize policy for the container. +type ContainerResizePolicy struct { + // Name of the resource to which this resource resize policy applies. + // Supported values: cpu, memory. + ResourceName ResourceName `json:"resourceName" protobuf:"bytes,1,opt,name=resourceName,casttype=ResourceName"` + // Restart policy to apply when specified resource is resized. + // If not specified, it defaults to NotRequired. + RestartPolicy ResourceResizeRestartPolicy `json:"restartPolicy" protobuf:"bytes,2,opt,name=restartPolicy,casttype=ResourceResizeRestartPolicy"` +} + // PreemptionPolicy describes a policy for if/when to preempt a pod. // +enum type PreemptionPolicy string @@ -2264,9 +2619,11 @@ type Capability string type Capabilities struct { // Added capabilities // +optional + // +listType=atomic Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` // Removed capabilities // +optional + // +listType=atomic Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` } @@ -2278,10 +2635,59 @@ type ResourceRequirements struct { Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. + // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` + // Claims lists the names of resources, defined in spec.resourceClaims, + // that are used by this container. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. It can only be set for containers. + // + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` +} + +// VolumeResourceRequirements describes the storage resource requirements for a volume. +type VolumeResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` + + // Claims got added by accident when volumes shared the ResourceRequirements struct + // with containers. Stripping the field got added in 1.27 and was backported to 1.26. + // Starting with Kubernetes 1.28, this field is not part of the volume API anymore. + // + // Future extensions must not use "claims" or field number 3. + // Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` +} + +// ResourceClaim references one entry in PodSpec.ResourceClaims. +type ResourceClaim struct { + // Name must match the name of one entry in pod.spec.resourceClaims of + // the Pod where this field is used. It makes that resource available + // inside a container. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"` } const ( @@ -2295,14 +2701,14 @@ type Container struct { // Each container in a pod must have a unique name (DNS_LABEL). // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Docker image name. + // Container image name. // More info: https://kubernetes.io/docs/concepts/containers/images // This field is optional to allow higher level config management to default or override // container images in workload controllers like Deployments and StatefulSets. // +optional Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. + // The container image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -2310,9 +2716,10 @@ type Container struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. + // The container image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -2320,6 +2727,7 @@ type Container struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` // Container's working directory. // If not specified, the container runtime's default will be used, which @@ -2327,12 +2735,12 @@ type Container struct { // Cannot be updated. // +optional WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here + // List of ports to expose from the container. Not specifying a port here // DOES NOT prevent that port from being exposed. Any port which is // listening on the default "0.0.0.0" address inside a container will be // accessible from the network. + // Modifying this array with strategic merge patch may corrupt the data. + // For more information See https://github.com/kubernetes/kubernetes/issues/108255. // Cannot be updated. // +optional // +patchMergeKey=containerPort @@ -2348,27 +2756,57 @@ type Container struct { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` // List of environment variables to set in the container. // Cannot be updated. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` // Compute Resources required by this container. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` + // RestartPolicy defines the restart behavior of individual containers in a pod. + // This field may only be set for init containers, and the only allowed value is "Always". + // For non-init containers or when this field is not specified, + // the restart behavior is defined by the Pod's restart policy and the container type. + // Setting the RestartPolicy as "Always" for the init container will have the following effect: + // this init container will be continually restarted on + // exit until all regular containers have terminated. Once all regular + // containers have completed, all init containers with restartPolicy "Always" + // will be shut down. This lifecycle differs from normal init containers and + // is often referred to as a "sidecar" container. Although this init + // container still starts in the init container sequence, it does not wait + // for the container to complete before proceeding to the next init + // container. Instead, the next init container starts immediately after this + // init container is started, or after any startupProbe has successfully + // completed. + // +featureGate=SidecarContainers + // +optional + RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Periodic probe of container liveness. @@ -2464,8 +2902,6 @@ type ProbeHandler struct { TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` // GRPC specifies an action involving a GRPC port. - // This is an alpha field and requires enabling GRPCContainerProbe feature gate. - // +featureGate=GRPCContainerProbe // +optional GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` } @@ -2484,6 +2920,10 @@ type LifecycleHandler struct { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` + // Sleep represents the duration that the container should sleep before being terminated. + // +featureGate=PodLifecycleSleepAction + // +optional + Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` } // Lifecycle describes actions that the management system should take in response to container lifecycle @@ -2557,7 +2997,7 @@ type ContainerStateTerminated struct { // Time at which the container last terminated // +optional FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` - // Container's ID in the format 'docker://' + // Container's ID in the format '://' // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` } @@ -2579,33 +3019,161 @@ type ContainerState struct { // ContainerStatus contains details for the current status of this container. type ContainerStatus struct { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Name is a DNS_LABEL representing the unique name of the container. + // Each container in a pod must have a unique name across all container types. // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Details about the container's current condition. + // State holds details about the container's current condition. // +optional State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` - // Details about the container's last termination condition. + // LastTerminationState holds the last termination state of the container to + // help debug container crashes and restarts. This field is not + // populated if the container is still running and RestartCount is 0. // +optional LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` - // Specifies whether the container has passed its readiness probe. + // Ready specifies whether the container is currently passing its readiness check. + // The value will change as readiness probes keep executing. If no readiness + // probes are specified, this field defaults to true once the container is + // fully started (see Started field). + // + // The value is typically used to determine whether a container is ready to + // accept traffic. Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted. + // RestartCount holds the number of times the container has been restarted. + // Kubelet makes an effort to always increment the value, but there + // are cases when the state may be lost due to node restarts and then the value + // may be reset to 0. The value is never negative. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` - // The image the container is running. + // Image is the name of container image that the container is running. + // The container image may not match the image used in the PodSpec, + // as it may have been resolved by the runtime. // More info: https://kubernetes.io/docs/concepts/containers/images. Image string `json:"image" protobuf:"bytes,6,opt,name=image"` - // ImageID of the container's image. + // ImageID is the image ID of the container's image. The image ID may not + // match the image ID of the image used in the PodSpec, as it may have been + // resolved by the runtime. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` - // Container's ID in the format 'docker://'. + // ContainerID is the ID of the container in the format '://'. + // Where type is a container runtime identifier, returned from Version call of CRI API + // (for example "containerd"). // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. + // Started indicates whether the container has finished its postStart lifecycle hook + // and passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered + // successful. Resets to false when the container is restarted, or if kubelet + // loses state temporarily. In both cases, startup probes will run again. + // Is always true when no startupProbe is defined and container is running and + // has passed the postStart lifecycle hook. The null value must be treated the + // same as false. // +optional Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` + // AllocatedResources represents the compute resources allocated for this container by the + // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission + // and after successfully admitting desired pod resize. + // +featureGate=InPlacePodVerticalScaling + // +optional + AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` + // Resources represents the compute resource requests and limits that have been successfully + // enacted on the running container after it has been started or has been successfully resized. + // +featureGate=InPlacePodVerticalScaling + // +optional + Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,11,opt,name=resources"` + // Status of volume mounts. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath + // +featureGate=RecursiveReadOnlyMounts + VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"` + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"` + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"` +} + +type ResourceStatus struct { + // Name of the resource. Must be unique within the pod and match one of the resources from the pod spec. + // +required + Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"` + // List of unique Resources health. Each element in the list contains an unique resource ID and resource health. + // At a minimum, ResourceID must uniquely identify the Resource + // allocated to the Pod on the Node for the lifetime of a Pod. + // See ResourceID type for it's definition. + // +listType=map + // +listMapKey=resourceID + Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` +} + +type ResourceHealthStatus string + +const ( + ResourceHealthStatusHealthy ResourceHealthStatus = "Healthy" + ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy" + ResourceHealthStatusUnknown ResourceHealthStatus = "Unknown" +) + +// ResourceID is calculated based on the source of this resource health information. +// For DevicePlugin: +// +// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73 +// +// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node. +// For DRA: +// +// dra://: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. +type ResourceID string + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP. +type ResourceHealth struct { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"` + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"` +} + +// ContainerUser represents user identity information +type ContainerUser struct { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"` + + // Windows holds user identity information initially attached to the first process of the containers in Windows + // This is just reserved for future use. + // Windows *WindowsContainerUser +} + +// LinuxContainerUser represents user identity information in Linux containers +type LinuxContainerUser struct { + // UID is the primary uid initially attached to the first process in the container + UID int64 `json:"uid" protobuf:"varint,1,name=uid"` + // GID is the primary gid initially attached to the first process in the container + GID int64 `json:"gid" protobuf:"varint,2,name=gid"` + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2647,6 +3215,12 @@ const ( PodReady PodConditionType = "Ready" // PodScheduled represents status of the scheduling process for this pod. PodScheduled PodConditionType = "PodScheduled" + // DisruptionTarget indicates the pod is about to be terminated due to a + // disruption (such as preemption, eviction API or garbage-collection). + DisruptionTarget PodConditionType = "DisruptionTarget" + // PodReadyToStartContainers pod sandbox is successfully configured and + // the pod is ready to launch containers. + PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -2654,6 +3228,22 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + + // PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler + // skips scheduling the pod because one or more scheduling gates are still present. + PodReasonSchedulingGated = "SchedulingGated" + + // PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens + // during scheduling, for example due to nodeAffinity parsing errors. + PodReasonSchedulerError = "SchedulerError" + + // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination + // is initiated by kubelet + PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -2679,6 +3269,37 @@ type PodCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } +// PodResizeStatus shows status of desired resize of a pod's containers. +type PodResizeStatus string + +const ( + // Pod resources resize has been requested and will be evaluated by node. + PodResizeStatusProposed PodResizeStatus = "Proposed" + // Pod resources resize has been accepted by node and is being actuated. + PodResizeStatusInProgress PodResizeStatus = "InProgress" + // Node cannot resize the pod at this time and will keep retrying. + PodResizeStatusDeferred PodResizeStatus = "Deferred" + // Requested pod resize is not feasible and will not be re-evaluated. + PodResizeStatusInfeasible PodResizeStatus = "Infeasible" +) + +// VolumeMountStatus shows status of volume mounts. +type VolumeMountStatus struct { + // Name corresponds to the name of the original VolumeMount. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // MountPath corresponds to the original VolumeMount. + MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"` + // ReadOnly corresponds to the original VolumeMount. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). + // An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, + // depending on the mount result. + // +featureGate=RecursiveReadOnlyMounts + // +optional + RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,4,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"` +} + // RestartPolicy describes how the container should be restarted. // Only one of the following restart policies may be specified. // If none of the following policies is specified, the default one @@ -2692,6 +3313,14 @@ const ( RestartPolicyNever RestartPolicy = "Never" ) +// ContainerRestartPolicy is the restart policy for a single container. +// This may only be set for init containers and only allowed value is "Always". +type ContainerRestartPolicy string + +const ( + ContainerRestartPolicyAlways ContainerRestartPolicy = "Always" +) + // DNSPolicy defines how a pod's DNS will be configured. // +enum type DNSPolicy string @@ -2728,7 +3357,8 @@ const ( // by the node selector terms. // +structType=atomic type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. + // Required. A list of node selector terms. The terms are ORed. + // +listType=atomic NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` } @@ -2739,9 +3369,11 @@ type NodeSelector struct { type NodeSelectorTerm struct { // A list of node selector requirements by node's labels. // +optional + // +listType=atomic MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` // A list of node selector requirements by node's fields. // +optional + // +listType=atomic MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"` } @@ -2759,6 +3391,7 @@ type NodeSelectorRequirement struct { // array must have a single element, which will be interpreted as an integer. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } @@ -2787,6 +3420,7 @@ type TopologySelectorTerm struct { // A list of topology selector requirements by labels. // +optional + // +listType=atomic MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"` } @@ -2797,6 +3431,7 @@ type TopologySelectorLabelRequirement struct { Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // An array of string values. One value must match the label to be selected. // Each entry in Values is ORed. + // +listType=atomic Values []string `json:"values" protobuf:"bytes,2,rep,name=values"` } @@ -2834,6 +3469,7 @@ type PodAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` // The scheduler will prefer to schedule pods to nodes that satisfy // the affinity expressions specified by this field, but it may choose @@ -2845,6 +3481,7 @@ type PodAffinity struct { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` } @@ -2869,6 +3506,7 @@ type PodAntiAffinity struct { // When there are multiple elements, the lists of nodes corresponding to each // podAffinityTerm are intersected, i.e. all terms must be satisfied. // +optional + // +listType=atomic RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` // The scheduler will prefer to schedule pods to nodes that satisfy // the anti-affinity expressions specified by this field, but it may choose @@ -2880,6 +3518,7 @@ type PodAntiAffinity struct { // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` } @@ -2900,13 +3539,15 @@ type WeightedPodAffinityTerm struct { // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. + // If it's null, this PodAffinityTerm matches with no Pods. // +optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies a static list of namespace names that the term applies to. // The term is applied to the union of the namespaces listed in this field // and the ones selected by namespaceSelector. - // null or empty namespaces list and null namespaceSelector means "this pod's namespace" + // null or empty namespaces list and null namespaceSelector means "this pod's namespace". // +optional + // +listType=atomic Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching // the labelSelector in the specified namespaces, where co-located is defined as running on a node @@ -2919,9 +3560,34 @@ type PodAffinityTerm struct { // and the ones listed in the namespaces field. // null selector and null or empty namespaces list means "this pod's namespace". // An empty selector ({}) matches all namespaces. - // This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"` + // MatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both matchLabelKeys and labelSelector. + // Also, matchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // + // +listType=atomic + // +optional + MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` + // MismatchLabelKeys is a set of pod label keys to select which pods will + // be taken into consideration. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + // to select the group of existing pods which pods will be taken into consideration + // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + // pod labels will be ignored. The default value is empty. + // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // + // +listType=atomic + // +optional + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` } // Node affinity is a group of node affinity scheduling rules. @@ -2952,6 +3618,7 @@ type NodeAffinity struct { // "weight" to the sum if the node matches the corresponding matchExpressions; the // node(s) with the highest sum are the most preferred. // +optional + // +listType=atomic PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` } @@ -3057,6 +3724,8 @@ type PodSpec struct { // +optional // +patchMergeKey=name // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` // List of initialization containers belonging to the pod. // Init containers are executed in order prior to containers being started. If any @@ -3073,6 +3742,8 @@ type PodSpec struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. @@ -3080,18 +3751,21 @@ type PodSpec struct { // Cannot be updated. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing // pod to perform user-initiated actions such as debugging. This list cannot be specified when // creating a pod, and it cannot be modified by updating the pod spec. In order to add an // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. + // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. // Default to Always. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy // +optional @@ -3130,7 +3804,7 @@ type PodSpec struct { // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. // Deprecated: Use serviceAccountName instead. // +k8s:conversion-gen=false // +optional @@ -3139,9 +3813,11 @@ type PodSpec struct { // +optional AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. @@ -3173,12 +3849,13 @@ type PodSpec struct { // +optional SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. + // If specified, these secrets will be passed to individual puller implementations for them to use. // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` // Specifies the hostname of the Pod // If not specified, the pod's hostname will be set to a system-defined value. @@ -3197,12 +3874,15 @@ type PodSpec struct { SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` // If specified, the pod's tolerations. // +optional + // +listType=atomic Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. + // file if specified. // +optional // +patchMergeKey=ip // +patchStrategy=merge + // +listType=map + // +listMapKey=ip HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"` // If specified, indicates the pod's priority. "system-node-critical" and // "system-cluster-critical" are two special keywords which indicate the @@ -3229,13 +3909,13 @@ type PodSpec struct { // all conditions specified in the readiness gates have status equal to "True" // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates // +optional + // +listType=atomic ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - // This is a beta feature as of Kubernetes v1.14. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` // EnableServiceLinks indicates whether information about services should be injected into pod's @@ -3246,7 +3926,6 @@ type PodSpec struct { // PreemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. @@ -3256,7 +3935,6 @@ type PodSpec struct { // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` // TopologySpreadConstraints describes how a group of pods ought to spread across topology @@ -3285,6 +3963,8 @@ type PodSpec struct { // If the OS field is set to windows, following fields must be unset: // - spec.hostPID // - spec.hostIPC + // - spec.hostUsers + // - spec.securityContext.appArmorProfile // - spec.securityContext.seLinuxOptions // - spec.securityContext.seccompProfile // - spec.securityContext.fsGroup @@ -3294,6 +3974,8 @@ type PodSpec struct { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy + // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile // - spec.containers[*].securityContext.capabilities @@ -3304,8 +3986,109 @@ type PodSpec struct { // - spec.containers[*].securityContext.runAsUser // - spec.containers[*].securityContext.runAsGroup // +optional - // This is an alpha field and requires the IdentifyPodOS feature OS *PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"` + + // Use the host's user namespace. + // Optional: Default to true. + // If set to true or not present, the pod will be run in the host user namespace, useful + // for when the pod needs a feature only available to the host user namespace, such as + // loading a kernel module with CAP_SYS_MODULE. + // When set to false, a new userns is created for the pod. Setting false is useful for + // mitigating container breakout vulnerabilities even allowing users to run their + // containers as root without actually having root privileges on the host. + // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature. + // +k8s:conversion-gen=false + // +optional + HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"` + + // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. + // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the + // scheduler will not attempt to schedule the pod. + // + // SchedulingGates can only be set at pod creation time, and be removed only afterwards. + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + SchedulingGates []PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"` + // ResourceClaims defines which ResourceClaims must be allocated + // and reserved before the Pod is allowed to start. The resources + // will be made available to those containers which consume them + // by name. + // + // This is an alpha field and requires enabling the + // DynamicResourceAllocation feature gate. + // + // This field is immutable. + // + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` +} + +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// +// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. +// Containers that need access to the ResourceClaim reference it with this name. +type PodResourceClaim struct { + // Name uniquely identifies this resource claim inside the pod. + // This must be a DNS_LABEL. + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Source is tombstoned since Kubernetes 1.31 where it got replaced with + // the inlined fields below. + // + // Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` + + // ResourceClaimName is the name of a ResourceClaim object in the same + // namespace as this pod. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"` + + // ResourceClaimTemplateName is the name of a ResourceClaimTemplate + // object in the same namespace as this pod. + // + // The template will be used to create a new ResourceClaim, which will + // be bound to this pod. When this pod is deleted, the ResourceClaim + // will also be deleted. The pod name and resource name, along with a + // generated component, will be used to form a unique name for the + // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + // + // This field is immutable and no changes will be made to the + // corresponding ResourceClaim by the control plane after creating the + // ResourceClaim. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"` +} + +// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim +// which references a ResourceClaimTemplate. It stores the generated name for +// the corresponding ResourceClaim. +type PodResourceClaimStatus struct { + // Name uniquely identifies this resource claim inside the pod. + // This must match the name of an entry in pod.spec.resourceClaims, + // which implies that the string must be a DNS_LABEL. + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // ResourceClaimName is the name of the ResourceClaim that was + // generated for the Pod in the namespace of the Pod. If this is + // unset, then generating a ResourceClaim was not necessary. The + // pod.spec.resourceClaims entry can be ignored in this case. + // + // +optional + ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"` } // OSName is the set of OS'es that can be used in OS. @@ -3326,6 +4109,13 @@ type PodOS struct { Name OSName `json:"name" protobuf:"bytes,1,opt,name=name"` } +// PodSchedulingGate is associated to a Pod to guard its scheduling. +type PodSchedulingGate struct { + // Name of the scheduling gate. + // Each scheduling gate must have a unique name field. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + // +enum type UnsatisfiableConstraintAction string @@ -3338,20 +4128,34 @@ const ( ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" ) +// NodeInclusionPolicy defines the type of node inclusion policy +// +enum +type NodeInclusionPolicy string + +const ( + // NodeInclusionPolicyIgnore means ignore this scheduling directive when calculating pod topology spread skew. + NodeInclusionPolicyIgnore NodeInclusionPolicy = "Ignore" + // NodeInclusionPolicyHonor means use this scheduling directive when calculating pod topology spread skew. + NodeInclusionPolicyHonor NodeInclusionPolicy = "Honor" +) + // TopologySpreadConstraint specifies how to spread matching pods among the given topology. type TopologySpreadConstraint struct { // MaxSkew describes the degree to which pods may be unevenly distributed. // When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference // between the number of matching pods in the target topology and the global minimum. + // The global minimum is the minimum number of matching pods in an eligible domain + // or zero if the number of eligible domains is less than MinDomains. // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 1/1/0: + // labelSelector spread as 2/2/1: + // In this case, the global minimum is 1. // +-------+-------+-------+ // | zone1 | zone2 | zone3 | // +-------+-------+-------+ - // | P | P | | + // | P P | P P | P | // +-------+-------+-------+ - // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; - // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + // scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) // violate MaxSkew(1). // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. // When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence @@ -3362,6 +4166,11 @@ type TopologySpreadConstraint struct { // and identical values are considered to be in the same topology. // We consider each as a "bucket", and try to put balanced number // of pods into each bucket. + // We define a domain as a particular instance of a topology. + // Also, we define an eligible domain as a domain whose nodes meet the requirements of + // nodeAffinityPolicy and nodeTaintsPolicy. + // e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + // And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. // It's a required field. TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"` // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy @@ -3391,6 +4200,62 @@ type TopologySpreadConstraint struct { // in their corresponding topology domain. // +optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"` + // MinDomains indicates a minimum number of eligible domains. + // When the number of eligible domains with matching topology keys is less than minDomains, + // Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + // And when the number of eligible domains with matching topology keys equals or greater than minDomains, + // this value has no effect on scheduling. + // As a result, when the number of eligible domains is less than minDomains, + // scheduler won't schedule more than maxSkew Pods to those domains. + // If value is nil, the constraint behaves as if MinDomains is equal to 1. + // Valid values are integers greater than 0. + // When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + // + // For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + // labelSelector spread as 2/2/2: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P | P P | P P | + // +-------+-------+-------+ + // The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + // In this situation, new pod with the same labelSelector cannot be scheduled, + // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + // it will violate MaxSkew. + // +optional + MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"` + // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + // when calculating pod topology spread skew. Options are: + // - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + // + // If this value is nil, the behavior is equivalent to the Honor policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"` + // NodeTaintsPolicy indicates how we will treat node taints when calculating + // pod topology spread skew. Options are: + // - Honor: nodes without taints, along with tainted nodes for which the incoming pod + // has a toleration, are included. + // - Ignore: node taints are ignored. All nodes are included. + // + // If this value is nil, the behavior is equivalent to the Ignore policy. + // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + // +optional + NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"` + // MatchLabelKeys is a set of pod label keys to select the pods over which + // spreading will be calculated. The keys are used to lookup values from the + // incoming pod labels, those key-value labels are ANDed with labelSelector + // to select the group of existing pods over which spreading will be calculated + // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + // MatchLabelKeys cannot be set when LabelSelector isn't set. + // Keys that don't exist in the incoming pod labels will + // be ignored. A null or empty list means only match against labelSelector. + // + // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + // +listType=atomic + // +optional + MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,8,opt,name=matchLabelKeys"` } const ( @@ -3402,8 +4267,10 @@ const ( // pod's hosts file. type HostAlias struct { // IP address of the host file entry. - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // Hostnames for the above IP address. + // +listType=atomic Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` } @@ -3424,6 +4291,23 @@ const ( FSGroupChangeAlways PodFSGroupChangePolicy = "Always" ) +// SupplementalGroupsPolicy defines how supplemental groups +// of the first container processes are calculated. +// +enum +type SupplementalGroupsPolicy string + +const ( + // SupplementalGroupsPolicyMerge means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // merged with the primary user's groups as defined in the container image + // (in /etc/group). + SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge" + // SupplementalGroupsPolicyStrict means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // used instead of any groups defined in the container image. + SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict" +) + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -3466,12 +4350,27 @@ type PodSecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"` // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -3488,6 +4387,7 @@ type PodSecurityContext struct { // sysctls (by the container runtime) might fail to launch. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume // before being exposed inside Pod. This field will only apply to @@ -3502,6 +4402,10 @@ type PodSecurityContext struct { // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,10,opt,name=seccompProfile"` + // appArmorProfile is the AppArmor options to use by the containers in this pod. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,11,opt,name=appArmorProfile"` } // SeccompProfile defines a pod/container's seccomp profile settings. @@ -3519,7 +4423,7 @@ type SeccompProfile struct { // localhostProfile indicates a profile defined in a file on the node should be used. // The profile must be preconfigured on the node to work. // Must be a descending path, relative to the kubelet's configured seccomp profile location. - // Must only be set if type is "Localhost". + // Must be set if type is "Localhost". Must NOT be set for any other type. // +optional LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"` } @@ -3538,6 +4442,38 @@ const ( SeccompProfileTypeLocalhost SeccompProfileType = "Localhost" ) +// AppArmorProfile defines a pod or container's AppArmor settings. +// +union +type AppArmorProfile struct { + // type indicates which kind of AppArmor profile will be applied. + // Valid options are: + // Localhost - a profile pre-loaded on the node. + // RuntimeDefault - the container runtime's default profile. + // Unconfined - no AppArmor enforcement. + // +unionDiscriminator + Type AppArmorProfileType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=AppArmorProfileType"` + + // localhostProfile indicates a profile loaded on the node that should be used. + // The profile must be preconfigured on the node to work. + // Must match the loaded name of the profile. + // Must be set if and only if type is "Localhost". + // +optional + LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"` +} + +// +enum +type AppArmorProfileType string + +const ( + // AppArmorProfileTypeUnconfined indicates that no AppArmor profile should be enforced. + AppArmorProfileTypeUnconfined AppArmorProfileType = "Unconfined" + // AppArmorProfileTypeRuntimeDefault indicates that the container runtime's default AppArmor + // profile should be used. + AppArmorProfileTypeRuntimeDefault AppArmorProfileType = "RuntimeDefault" + // AppArmorProfileTypeLocalhost indicates that a profile pre-loaded on the node should be used. + AppArmorProfileTypeLocalhost AppArmorProfileType = "Localhost" +) + // PodQOSClass defines the supported qos classes of Pods. // +enum type PodQOSClass string @@ -3558,17 +4494,20 @@ type PodDNSConfig struct { // This will be appended to the base nameservers generated from DNSPolicy. // Duplicated nameservers will be removed. // +optional + // +listType=atomic Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` // A list of DNS search domains for host-name lookup. // This will be appended to the base search paths generated from DNSPolicy. // Duplicated search paths will be removed. // +optional + // +listType=atomic Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` // A list of DNS resolver options. // This will be merged with the base options generated from DNSPolicy. // Duplicated entries will be removed. Resolution options given in Options // will override those that appear in the base DNSPolicy. // +optional + // +listType=atomic Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` } @@ -3580,13 +4519,18 @@ type PodDNSConfigOption struct { Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodIP represents a single IP address allocated to the pod. type PodIP struct { - // ip is an IP address (IPv4 or IPv6) assigned to the pod - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // IP is the IP address assigned to the pod + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` +} + +// HostIP represents a single IP address allocated to the host. +type HostIP struct { + // IP is the IP address assigned to the host + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // EphemeralContainerCommon is a copy of all fields in Container to be inlined in @@ -3597,11 +4541,11 @@ type EphemeralContainerCommon struct { // Name of the ephemeral container specified as a DNS_LABEL. // This name must be unique among all containers, init containers and ephemeral containers. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Docker image name. + // Container image name. // More info: https://kubernetes.io/docs/concepts/containers/images Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. + // The image's ENTRYPOINT is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -3609,9 +4553,10 @@ type EphemeralContainerCommon struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. + // The image's CMD is used if this is not provided. // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will @@ -3619,6 +4564,7 @@ type EphemeralContainerCommon struct { // of whether the variable exists or not. Cannot be updated. // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell // +optional + // +listType=atomic Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` // Container's working directory. // If not specified, the container runtime's default will be used, which @@ -3641,26 +4587,45 @@ type EphemeralContainerCommon struct { // Values defined by an Env with a duplicate key will take precedence. // Cannot be updated. // +optional + // +listType=atomic EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` // List of environment variables to set in the container. // Cannot be updated. // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources // already allocated to the pod. // +optional Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Resources resize policy for the container. + // +featureGate=InPlacePodVerticalScaling + // +optional + // +listType=atomic + ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` + // Restart policy for the container to manage the restart behavior of each + // container within a pod. + // This may only be set for init containers. You cannot set this field on + // ephemeral containers. + // +featureGate=SidecarContainers + // +optional + RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. // Cannot be updated. // +optional // +patchMergeKey=mountPath // +patchStrategy=merge + // +listType=map + // +listMapKey=mountPath VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. // +patchMergeKey=devicePath // +patchStrategy=merge + // +listType=map + // +listMapKey=devicePath // +optional VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Probes are not allowed for ephemeral containers. @@ -3740,8 +4705,6 @@ var _ = Container(EphemeralContainerCommon{}) // // To add an ephemeral container, use the ephemeralcontainers subresource of an existing // Pod. Ephemeral containers may not be removed or restarted. -// -// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate. type EphemeralContainer struct { // Ephemeral containers have all of the fields of Container, plus additional fields // specific to ephemeral containers. Fields in common with Container are in the @@ -3788,6 +4751,8 @@ type PodStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` // A human readable message indicating details about why the pod is in this condition. // +optional @@ -3806,10 +4771,23 @@ type PodStatus struct { // +optional NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"` - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` - // IP address allocated to the pod. Routable at least within the cluster. + + // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must + // match the hostIP field. This list is empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will + // not be updated even if there is a node is assigned to this pod. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + // +listType=atomic + HostIPs []HostIP `json:"hostIPs,omitempty" protobuf:"bytes,16,rep,name=hostIPs" patchStrategy:"merge" patchMergeKey:"ip"` + + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` @@ -3820,6 +4798,8 @@ type PodStatus struct { // +optional // +patchStrategy=merge // +patchMergeKey=ip + // +listType=map + // +listMapKey=ip PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` // RFC 3339 date and time at which the object was acknowledged by the Kubelet. @@ -3831,25 +4811,43 @@ type PodStatus struct { // init container will have ready = true, the most recently started container will have // startTime set. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +listType=atomic InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. + // The list has one entry per container in the manifest. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional + // +listType=atomic ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` // Status for any ephemeral containers that have run in this pod. - // This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate. // +optional + // +listType=atomic EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` + + // Status of resources resize desired for pod's containers. + // It is empty if no resources resize is pending. + // Any changes to container resources will automatically set this to "Proposed" + // +featureGate=InPlacePodVerticalScaling + // +optional + Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"` + + // Status of resource claims. + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + // +listType=map + // +listMapKey=name + // +featureGate=DynamicResourceAllocation + // +optional + ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { @@ -3870,6 +4868,7 @@ type PodStatusResult struct { // +genclient // +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Pod is a collection of containers that can run on a host. This resource is created // by clients and scheduled onto hosts. @@ -3895,6 +4894,7 @@ type Pod struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodList is a list of Pods. type PodList struct { @@ -3924,6 +4924,7 @@ type PodTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { @@ -3940,6 +4941,7 @@ type PodTemplate struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { @@ -3985,6 +4987,7 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. + // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` @@ -3993,7 +4996,7 @@ type ReplicationControllerSpec struct { // ReplicationControllerStatus represents the current status of a replication // controller. type ReplicationControllerStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` @@ -4017,6 +5020,8 @@ type ReplicationControllerStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -4051,6 +5056,7 @@ type ReplicationControllerCondition struct { // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { @@ -4077,6 +5083,7 @@ type ReplicationController struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { @@ -4145,29 +5152,59 @@ const ( ServiceTypeExternalName ServiceType = "ExternalName" ) -// ServiceInternalTrafficPolicyType describes the type of traffic routing for -// internal traffic +// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they +// receive on the ClusterIP. // +enum -type ServiceInternalTrafficPolicyType string +type ServiceInternalTrafficPolicy string const ( - // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster" + // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" - // ServiceInternalTrafficPolicyLocal only routes to node-local - // endpoints, otherwise drops the traffic - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local" + // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same + // node as the client pod (dropping the traffic if there are no local endpoints). + ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" ) -// Service External Traffic Policy Type string +// for backwards compat // +enum -type ServiceExternalTrafficPolicyType string +type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy + +// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they +// receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, +// and LoadBalancer IPs. +// +enum +type ServiceExternalTrafficPolicy string const ( - // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" - // ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" + // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. + ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" + + // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by + // routing only to endpoints on the same node as the traffic was received on + // (dropping the traffic if there are no local endpoints). + ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" +) + +// for backwards compat +// +enum +type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy + +const ( + ServiceExternalTrafficPolicyTypeLocal = ServiceExternalTrafficPolicyLocal + ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster +) + +// These are valid values for the TrafficDistribution field of a Service. +const ( + // Indicates a preference for routing traffic to endpoints that are + // topologically proximate to the client. The interpretation of "topologically + // proximate" may vary across implementations and could encompass endpoints + // within the same node, rack, zone, or even region. Setting this value gives + // implementations permission to make different tradeoffs, e.g. optimizing for + // proximity rather than equal distribution of load. Users should not set this + // value if such tradeoffs are not acceptable. + ServiceTrafficDistributionPreferClose = "PreferClose" ) // These are the valid conditions of a service. @@ -4175,6 +5212,9 @@ const ( // LoadBalancerPortsError represents the condition of the requested ports // on the cloud load balancer instance. LoadBalancerPortsError = "LoadBalancerPortsError" + // LoadBalancerPortsErrorReason reason in ServiceStatus condition LoadBalancerPortsError + // means the LoadBalancer was not able to be configured correctly. + LoadBalancerPortsErrorReason = "LoadBalancerMixedProtocolNotSupported" ) // ServiceStatus represents the current status of a service. @@ -4197,6 +5237,7 @@ type LoadBalancerStatus struct { // Ingress is a list containing ingress points for the load-balancer. // Traffic intended for the service should be sent to these ingress points. // +optional + // +listType=atomic Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } @@ -4213,6 +5254,15 @@ type LoadBalancerIngress struct { // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"` + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -4230,32 +5280,38 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // IPFamilyUnknown indicates that this IP is unknown protocol + IPFamilyUnknown IPFamily = "" ) -// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service +// IPFamilyPolicy represents the dual-stack-ness requested or required by a Service // +enum -type IPFamilyPolicyType string +type IPFamilyPolicy string const ( // IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily. // The IPFamily assigned is based on the default IPFamily used by the cluster // or as identified by service.spec.ipFamilies field - IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack" + IPFamilyPolicySingleStack IPFamilyPolicy = "SingleStack" // IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when // the cluster is configured for dual-stack. If the cluster is not configured // for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not // set in service.spec.ipFamilies then the service will be assigned the default IPFamily // configured on the cluster - IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack" + IPFamilyPolicyPreferDualStack IPFamilyPolicy = "PreferDualStack" // IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using // IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The // IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If // service.spec.ipFamilies was not provided then it will be assigned according to how they are // configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative // IPFamily will be added by apiserver - IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack" + IPFamilyPolicyRequireDualStack IPFamilyPolicy = "RequireDualStack" ) +// for backwards compat +// +enum +type IPFamilyPolicyType = IPFamilyPolicy + // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. @@ -4347,6 +5403,7 @@ type ServiceSpec struct { // at a node with this IP. A common example is external load-balancers // that are not part of the Kubernetes system. // +optional + // +listType=atomic ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` // Supports "ClientIP" and "None". Used to maintain session affinity. @@ -4357,11 +5414,13 @@ type ServiceSpec struct { // +optional SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. + // Only applies to Service Type: LoadBalancer. // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` @@ -4370,6 +5429,7 @@ type ServiceSpec struct { // cloud-provider does not support the feature." // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional + // +listType=atomic LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` // externalName is the external reference that discovery mechanisms will @@ -4379,14 +5439,21 @@ type ServiceSpec struct { // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + // externalTrafficPolicy describes how nodes distribute service traffic they + // receive on one of the Service's "externally-facing" addresses (NodePorts, + // ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + // the service in a way that assumes that external load balancers will take care + // of balancing the service traffic between nodes, and so each node will deliver + // traffic only to the node-local endpoints of the service, without masquerading + // the client source IP. (Traffic mistakenly sent to a node with no endpoints will + // be dropped.) The default value, "Cluster", uses the standard behavior of + // routing to all endpoints evenly (possibly modified by topology and other + // features). Note that traffic sent to an External IP or LoadBalancer IP from + // within the cluster will always get "Cluster" semantics, but clients sending to + // a NodePort from within the cluster may need to take traffic policy into account + // when picking a node. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` // healthCheckNodePort specifies the healthcheck nodePort for the service. // This only applies when type is set to LoadBalancer and @@ -4397,6 +5464,7 @@ type ServiceSpec struct { // service or not. If this field is specified when creating a Service // which does not need it, creation will fail. This field will be wiped // when updating a Service to no longer need it (e.g. changing type). + // This field cannot be updated once set. // +optional HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` @@ -4416,7 +5484,7 @@ type ServiceSpec struct { SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` // TopologyKeys is tombstoned to show why 16 is reserved protobuf tag. - //TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` + // TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` @@ -4450,7 +5518,7 @@ type ServiceSpec struct { // ipFamilies and clusterIPs fields depend on the value of this field. This // field will be wiped when updating a service to type ExternalName. // +optional - IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"` + IPFamilyPolicy *IPFamilyPolicy `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicy"` // allocateLoadBalancerNodePorts defines if NodePorts will be automatically // allocated for services with type LoadBalancer. Default is "true". It @@ -4459,8 +5527,6 @@ type ServiceSpec struct { // value), those requests will be respected, regardless of this field. // This field may only be set for services with type LoadBalancer and will // be cleared if the type is changed to any other type. - // This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature. - // +featureGate=ServiceLBNodePortControl // +optional AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"` @@ -4474,19 +5540,28 @@ type ServiceSpec struct { // implementation (e.g. cloud providers) should ignore Services that set this field. // This field can only be set when creating or updating a Service to type 'LoadBalancer'. // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - // +featureGate=LoadBalancerClass // +optional LoadBalancerClass *string `json:"loadBalancerClass,omitempty" protobuf:"bytes,21,opt,name=loadBalancerClass"` - // InternalTrafficPolicy specifies if the cluster internal traffic - // should be routed to all endpoints or node-local endpoints only. - // "Cluster" routes internal traffic to a Service to all endpoints. - // "Local" routes traffic to node-local endpoints only, traffic is - // dropped if no node-local endpoints are ready. - // The default value is "Cluster". - // +featureGate=ServiceInternalTrafficPolicy + // InternalTrafficPolicy describes how nodes distribute service traffic they + // receive on the ClusterIP. If set to "Local", the proxy will assume that pods + // only want to talk to endpoints of the service on the same node as the pod, + // dropping the traffic if there are no local endpoints. The default value, + // "Cluster", uses the standard behavior of routing to all endpoints evenly + // (possibly modified by topology and other features). + // +optional + InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + + // TrafficDistribution offers a way to express preferences for how traffic is + // distributed to Service endpoints. Implementations can use this field as a + // hint, but are not required to guarantee strict adherence. If the field is + // not set, the implementation will apply its default routing strategy. If set + // to "PreferClose", implementations should prioritize endpoints that are + // topologically close (e.g., same zone). + // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // +featureGate=ServiceTrafficDistribution // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"` } // ServicePort contains information on service's port. @@ -4506,10 +5581,19 @@ type ServicePort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"` @@ -4544,6 +5628,7 @@ type ServicePort struct { // +genclient // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Service is a named abstraction of software service (for example, mysql) consisting of local port // (for example 3306) that the proxy listens on, and the selector that determines which pods @@ -4575,6 +5660,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceList holds a list of services. type ServiceList struct { @@ -4591,6 +5677,7 @@ type ServiceList struct { // +genclient // +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccount binds together: // * a name, understood by users, and perhaps by peripheral systems, for an identity @@ -4603,11 +5690,16 @@ type ServiceAccount struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. + // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". + // This field should not be used to find auto-generated service account token secrets for use outside of pods. + // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. // More info: https://kubernetes.io/docs/concepts/configuration/secret // +optional // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images @@ -4615,6 +5707,7 @@ type ServiceAccount struct { // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod // +optional + // +listType=atomic ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. @@ -4624,6 +5717,7 @@ type ServiceAccount struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { @@ -4640,6 +5734,7 @@ type ServiceAccountList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Endpoints is a collection of endpoints that implement the actual service. Example: // @@ -4669,6 +5764,7 @@ type Endpoints struct { // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. // +optional + // +listType=atomic Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"` } @@ -4689,14 +5785,17 @@ type EndpointSubset struct { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. // +optional + // +listType=atomic Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` // IP addresses which offer the related ports but are not currently marked as ready // because they have not yet finished starting, have recently failed a readiness check, // or have recently failed a liveness check. // +optional + // +listType=atomic NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` // Port numbers available on the related IP addresses. // +optional + // +listType=atomic Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` } @@ -4704,11 +5803,8 @@ type EndpointSubset struct { // +structType=atomic type EndpointAddress struct { // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. + // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), + // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // The Hostname of this endpoint // +optional @@ -4741,16 +5837,26 @@ type EndpointPort struct { Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EndpointsList is a list of endpoints. type EndpointsList struct { @@ -4775,6 +5881,7 @@ type NodeSpec struct { // each of IPv4 and IPv6. // +optional // +patchStrategy=merge + // +listType=set PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` // ID of the node assigned by the cloud provider in the format: :// @@ -4786,11 +5893,10 @@ type NodeSpec struct { Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` // If specified, the node's taints. // +optional + // +listType=atomic Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` - // Deprecated. If specified, the source of the node's configuration. - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. - // This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration + // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed. // +optional ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"` @@ -4863,6 +5969,38 @@ type NodeDaemonEndpoints struct { KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` } +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. +type NodeRuntimeHandlerFeatures struct { + // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. + // +featureGate=RecursiveReadOnlyMounts + // +optional + RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"` + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"` +} + +// NodeRuntimeHandler is a set of runtime handler information. +type NodeRuntimeHandler struct { + // Runtime handler name. + // Empty for the default runtime handler. + // +optional + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Supported features. + // +optional + Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"` +} + +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +type NodeFeatures struct { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"` +} + // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. type NodeSystemInfo struct { // MachineID reported by the node. For unique machine identification @@ -4879,11 +6017,11 @@ type NodeSystemInfo struct { KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + // ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2). ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` // Kubelet Version reported by the node. KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` // The Operating System reported by the node OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` @@ -4941,7 +6079,7 @@ type NodeConfigStatus struct { // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // Allocatable represents the resources of a node that are available for scheduling. @@ -4958,16 +6096,24 @@ type NodeStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead - // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. + // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. + // Consumers should assume that addresses can change during the + // lifetime of a Node. However, there are some exceptions where this may not + // be possible, such as Pods that inherit a Node's address in its own status or + // consumers of the downward API (status.hostIP). // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` // Endpoints of daemons running on the Node. // +optional @@ -4978,16 +6124,29 @@ type NodeStatus struct { NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` // List of container images on this node // +optional + // +listType=atomic Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` // List of attachable volumes in use (mounted) by the node. // +optional + // +listType=atomic VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` // List of volumes that are attached to the node. // +optional + // +listType=atomic VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` // Status of the config assigned to the node via the dynamic Kubelet config feature. // +optional Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"` + // The available runtime handlers. + // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport + // +optional + // +listType=atomic + RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"` + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"` } type UniqueVolumeName string @@ -5008,6 +6167,7 @@ type AvoidPods struct { // Bounded-sized list of signatures of pods that should avoid this node, sorted // in timestamp order from oldest to newest. Size of the slice is unspecified. // +optional + // +listType=atomic PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"` } @@ -5037,8 +6197,9 @@ type PodSignature struct { // Describe a container image type ContainerImage struct { // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"] // +optional + // +listType=atomic Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` // The size of the image in bytes. // +optional @@ -5061,8 +6222,7 @@ const ( type NodeConditionType string // These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here. -// The built-in set of conditions are: -// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. +// Relevant events contain "NodeReady", "NodeNotReady", "NodeSchedulable", and "NodeNotSchedulable". const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" @@ -5164,7 +6324,6 @@ const ( // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) ResourceStorage ResourceName = "storage" // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. ResourceEphemeralStorage ResourceName = "ephemeral-storage" ) @@ -5183,6 +6342,7 @@ type ResourceList map[ResourceName]resource.Quantity // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). @@ -5207,6 +6367,7 @@ type Node struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { @@ -5234,6 +6395,7 @@ type NamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional + // +listType=atomic Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` } @@ -5248,6 +6410,8 @@ type NamespaceStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } @@ -5302,6 +6466,7 @@ type NamespaceCondition struct { // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Namespace provides a scope for Names. // Use of multiple namespaces is optional. @@ -5324,6 +6489,7 @@ type Namespace struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NamespaceList is a list of Namespaces. type NamespaceList struct { @@ -5339,6 +6505,7 @@ type NamespaceList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. // Deprecated in 1.7, please use the bindings subresource of pods instead. @@ -5363,6 +6530,7 @@ type Preconditions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodLogOptions is the query options for a Pod's logs REST call. type PodLogOptions struct { @@ -5415,6 +6583,7 @@ type PodLogOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.1 // PodAttachOptions is the query options to a Pod's remote attach call. // --- @@ -5453,6 +6622,7 @@ type PodAttachOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodExecOptions is the query options to a Pod's remote exec call. // --- @@ -5485,11 +6655,13 @@ type PodExecOptions struct { Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` // Command is the remote command to execute. argv array. Not executed within a shell. + // +listType=atomic Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // PodPortForwardOptions is the query options to a Pod's port forward call // when using WebSockets. @@ -5503,11 +6675,13 @@ type PodPortForwardOptions struct { // List of ports to forward // Required when using WebSockets // +optional + // +listType=atomic Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` } // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodProxyOptions is the query options to a Pod's proxy call. type PodProxyOptions struct { @@ -5520,6 +6694,7 @@ type PodProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // NodeProxyOptions is the query options to a Node's proxy call. type NodeProxyOptions struct { @@ -5532,6 +6707,7 @@ type NodeProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ServiceProxyOptions is the query options to a Service's proxy call. type ServiceProxyOptions struct { @@ -5606,9 +6782,15 @@ type ObjectReference struct { // +structType=atomic type LocalObjectReference struct { // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // This field is effectively required, but due to backwards compatibility is + // allowed to be empty. Instances of this type with an empty value here are + // almost certainly wrong. // TODO: Add other useful fields. apiVersion, kind, uid? + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional + // +default="" + // +kubebuilder:default="" + // TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` } @@ -5628,6 +6810,7 @@ type TypedLocalObjectReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SerializedReference is a reference to serialized object. type SerializedReference struct { @@ -5657,6 +6840,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Event is a report of an event somewhere in the cluster. Events // have a limited retention time and triggers and messages may evolve @@ -5741,6 +6925,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EventList is a list of events. type EventList struct { @@ -5755,6 +6940,7 @@ type EventList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // List holds a list of objects, which may not be known by the server. type List metav1.List @@ -5796,11 +6982,13 @@ type LimitRangeItem struct { // LimitRangeSpec defines a min/max usage limit for resources that match on kind. type LimitRangeSpec struct { // Limits is the list of LimitRangeItem objects that are enforced. + // +listType=atomic Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRange sets resource usage limits for each kind of resource in a Namespace. type LimitRange struct { @@ -5817,6 +7005,7 @@ type LimitRange struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { @@ -5865,6 +7054,8 @@ const ( ResourceLimitsMemory ResourceName = "limits.memory" // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" + // resource.k8s.io devices requested with a certain DeviceClass, number + ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices" ) // The following identify resource prefix for Kubernetes object types @@ -5892,7 +7083,6 @@ const ( // Match all pod objects that have priority class mentioned ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" // Match all pod objects that have cross-namespace pod (anti)affinity mentioned. - // This is a beta feature enabled by the PodAffinityNamespaceSelector feature flag. ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity" ) @@ -5905,6 +7095,7 @@ type ResourceQuotaSpec struct { // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. // +optional + // +listType=atomic Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota // but expressed using ScopeSelectorOperator in combination with possible values. @@ -5919,6 +7110,7 @@ type ResourceQuotaSpec struct { type ScopeSelector struct { // A list of scope selector requirements by scope of the resources. // +optional + // +listType=atomic MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` } @@ -5935,6 +7127,7 @@ type ScopedResourceSelectorRequirement struct { // the values array must be empty. // This array is replaced during a strategic merge patch. // +optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } @@ -5963,6 +7156,7 @@ type ResourceQuotaStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { @@ -5984,6 +7178,7 @@ type ResourceQuota struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { @@ -6000,6 +7195,7 @@ type ResourceQuotaList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. @@ -6126,6 +7322,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SecretList is a list of Secret. type SecretList struct { @@ -6142,6 +7339,7 @@ type SecretList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMap holds configuration data for pods to consume. type ConfigMap struct { @@ -6178,6 +7376,7 @@ type ConfigMap struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { @@ -6220,6 +7419,7 @@ type ComponentCondition struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ComponentStatus (and ComponentStatusList) holds the cluster validation info. // Deprecated: This API is deprecated in v1.19+ @@ -6234,10 +7434,13 @@ type ComponentStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Status of all the conditions for the component as a list of ComponentStatus objects. // Deprecated: This API is deprecated in v1.19+ @@ -6257,6 +7460,7 @@ type ComponentStatusList struct { type DownwardAPIVolumeSource struct { // Items is a list of downward API volume file // +optional + // +listType=atomic Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` // Optional: mode bits to use on created files by default. Must be a // Optional: mode bits used to set permissions on created files by default. @@ -6278,7 +7482,7 @@ const ( type DownwardAPIVolumeFile struct { // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. // +optional FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` // Selects a resource of the container: only resources limits and requests @@ -6301,6 +7505,7 @@ type DownwardAPIVolumeFile struct { type DownwardAPIProjection struct { // Items is a list of DownwardAPIVolume file // +optional + // +listType=atomic Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` } @@ -6369,7 +7574,7 @@ type SecurityContext struct { // +optional AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -6381,6 +7586,11 @@ type SecurityContext struct { // Note that this field cannot be set when spec.os.name is windows. // +optional SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,11,opt,name=seccompProfile"` + // appArmorProfile is the AppArmor options to use by this container. If set, this profile + // overrides the pod's appArmorProfile. + // Note that this field cannot be set when spec.os.name is windows. + // +optional + AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,12,opt,name=appArmorProfile"` } // +enum @@ -6434,17 +7644,15 @@ type WindowsSecurityContextOptions struct { RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` // HostProcess determines if a container should be run as a 'Host Process' container. - // This field is alpha-level and will only be honored by components that enable the - // WindowsHostProcessContainers feature flag. Setting this field without the feature - // flag will result in errors when validating the Pod. All of a Pod's containers must - // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess - // containers and non-HostProcess containers). In addition, if HostProcess is true - // then HostNetwork must also be set to true. + // All of a Pod's containers must have the same effective HostProcess value + // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + // In addition, if HostProcess is true then HostNetwork must also be set to true. // +optional HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // RangeAllocation is not a public type. type RangeAllocation struct { @@ -6479,13 +7687,6 @@ type Sysctl struct { Value string `json:"value" protobuf:"bytes,2,opt,name=value"` } -// NodeResources is an object for conveying resource information about a node. -// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` -} - const ( // Enable stdin for remote command execution ExecStdinParam = "input" @@ -6520,6 +7721,13 @@ const ( PortForwardRequestIDHeader = "requestID" ) +const ( + // MixedProtocolNotSupported error in PortStatus means that the cloud provider + // can't publish the port on the load balancer because mixed values of protocols + // on the same LoadBalancer type of Service are not supported by the cloud provider. + MixedProtocolNotSupported = "MixedProtocolNotSupported" +) + // PortStatus represents the error condition of a service port type PortStatus struct { @@ -6542,3 +7750,35 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } + +// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP +type LoadBalancerIPMode string + +const ( + // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" + // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and port or the pod's IP and port. + LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" +) + +// ImageVolumeSource represents a image volume resource. +type ImageVolumeSource struct { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"` +} diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 51e782510..950806ef8 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -24,15 +24,15 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AWSElasticBlockStoreVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "volumeID": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "fsType": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "partition": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "readOnly": "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", } func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { @@ -50,6 +50,16 @@ func (Affinity) SwaggerDoc() map[string]string { return map_Affinity } +var map_AppArmorProfile = map[string]string{ + "": "AppArmorProfile defines a pod or container's AppArmor settings.", + "type": "type indicates which kind of AppArmor profile will be applied. Valid options are:\n Localhost - a profile pre-loaded on the node.\n RuntimeDefault - the container runtime's default profile.\n Unconfined - no AppArmor enforcement.", + "localhostProfile": "localhostProfile indicates a profile loaded on the node that should be used. The profile must be preconfigured on the node to work. Must match the loaded name of the profile. Must be set if and only if type is \"Localhost\".", +} + +func (AppArmorProfile) SwaggerDoc() map[string]string { + return map_AppArmorProfile +} + var map_AttachedVolume = map[string]string{ "": "AttachedVolume describes a volume attached to a node", "name": "Name of the attached volume", @@ -71,12 +81,12 @@ func (AvoidPods) SwaggerDoc() map[string]string { var map_AzureDiskVolumeSource = map[string]string{ "": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "diskName": "The Name of the data disk in the blob storage", - "diskURI": "The URI the data disk in the blob storage", - "cachingMode": "Host Caching mode: None, Read Only, Read Write.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "kind": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + "diskName": "diskName is the Name of the data disk in the blob storage", + "diskURI": "diskURI is the URI of data disk in the blob storage", + "cachingMode": "cachingMode is the Host Caching mode: None, Read Only, Read Write.", + "fsType": "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "kind": "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", } func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { @@ -85,10 +95,10 @@ func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { var map_AzureFilePersistentVolumeSource = map[string]string{ "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "secretName": "the name of secret that contains Azure Storage Account Name and Key", - "shareName": "Share Name", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "secretNamespace": "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", + "secretName": "secretName is the name of secret that contains Azure Storage Account Name and Key", + "shareName": "shareName is the azure Share Name", + "readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretNamespace": "secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", } func (AzureFilePersistentVolumeSource) SwaggerDoc() map[string]string { @@ -97,9 +107,9 @@ func (AzureFilePersistentVolumeSource) SwaggerDoc() map[string]string { var map_AzureFileVolumeSource = map[string]string{ "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "secretName": "the name of secret that contains Azure Storage Account Name and Key", - "shareName": "Share Name", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretName": "secretName is the name of secret that contains Azure Storage Account Name and Key", + "shareName": "shareName is the azure share Name", + "readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", } func (AzureFileVolumeSource) SwaggerDoc() map[string]string { @@ -118,15 +128,16 @@ func (Binding) SwaggerDoc() map[string]string { var map_CSIPersistentVolumeSource = map[string]string{ "": "Represents storage that is managed by an external CSI volume driver (Beta feature)", - "driver": "Driver is the name of the driver to use for this volume. Required.", - "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", - "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", - "volumeAttributes": "Attributes of the volume to publish.", - "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "driver": "driver is the name of the driver to use for this volume. Required.", + "volumeHandle": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "readOnly": "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + "fsType": "fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", + "volumeAttributes": "volumeAttributes of the volume to publish.", + "controllerPublishSecretRef": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -135,11 +146,11 @@ func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CSIVolumeSource = map[string]string{ "": "Represents a source location of a volume to mount, managed by an external CSI driver", - "driver": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", - "readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", - "fsType": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", - "volumeAttributes": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", + "driver": "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "readOnly": "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).", + "fsType": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "volumeAttributes": "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", } func (CSIVolumeSource) SwaggerDoc() map[string]string { @@ -158,12 +169,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSPersistentVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "monitors": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "path": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -172,12 +183,12 @@ func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "monitors": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "path": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -186,10 +197,10 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderPersistentVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", + "volumeID": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "secretRef": "secretRef is Optional: points to a secret object containing parameters used to connect to OpenStack.", } func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -198,10 +209,10 @@ func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", + "volumeID": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "secretRef": "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.", } func (CinderVolumeSource) SwaggerDoc() map[string]string { @@ -217,6 +228,19 @@ func (ClientIPConfig) SwaggerDoc() map[string]string { return map_ClientIPConfig } +var map_ClusterTrustBundleProjection = map[string]string{ + "": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", + "name": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", + "signerName": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", + "labelSelector": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\".", + "optional": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", + "path": "Relative path from the volume root to write the bundle.", +} + +func (ClusterTrustBundleProjection) SwaggerDoc() map[string]string { + return map_ClusterTrustBundleProjection +} + var map_ComponentCondition = map[string]string{ "": "Information about the condition of a component.", "type": "Type of condition for a component. Valid value: \"Healthy\"", @@ -305,8 +329,8 @@ func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", - "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or its keys must be defined", + "items": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "optional specify whether the ConfigMap or its keys must be defined", } func (ConfigMapProjection) SwaggerDoc() map[string]string { @@ -315,9 +339,9 @@ func (ConfigMapProjection) SwaggerDoc() map[string]string { var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", - "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "defaultMode": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or its keys must be defined", + "items": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "optional specify whether the ConfigMap or its keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -327,14 +351,16 @@ func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { var map_Container = map[string]string{ "": "A single application container that you want to run within a pod.", "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "image": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "command": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "ports": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "resizePolicy": "Resources resize policy for the container.", + "restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -356,7 +382,7 @@ func (Container) SwaggerDoc() map[string]string { var map_ContainerImage = map[string]string{ "": "Describe a container image", - "names": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "names": "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]", "sizeBytes": "The size of the image in bytes.", } @@ -377,6 +403,16 @@ func (ContainerPort) SwaggerDoc() map[string]string { return map_ContainerPort } +var map_ContainerResizePolicy = map[string]string{ + "": "ContainerResizePolicy represents resource resize policy for the container.", + "resourceName": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "restartPolicy": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", +} + +func (ContainerResizePolicy) SwaggerDoc() map[string]string { + return map_ContainerResizePolicy +} + var map_ContainerState = map[string]string{ "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", "waiting": "Details about a waiting container", @@ -405,7 +441,7 @@ var map_ContainerStateTerminated = map[string]string{ "message": "Message regarding the last termination of the container", "startedAt": "Time at which previous execution of the container started", "finishedAt": "Time at which the container last terminated", - "containerID": "Container's ID in the format 'docker://'", + "containerID": "Container's ID in the format '://'", } func (ContainerStateTerminated) SwaggerDoc() map[string]string { @@ -423,22 +459,36 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - "state": "Details about the container's current condition.", - "lastState": "Details about the container's last termination condition.", - "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format 'docker://'.", - "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", + "volumeMounts": "Status of volume mounts.", + "user": "User represents user identity information initially attached to the first process of the container", + "allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.", } func (ContainerStatus) SwaggerDoc() map[string]string { return map_ContainerStatus } +var map_ContainerUser = map[string]string{ + "": "ContainerUser represents user identity information", + "linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.", +} + +func (ContainerUser) SwaggerDoc() map[string]string { + return map_ContainerUser +} + var map_DaemonEndpoint = map[string]string{ "": "DaemonEndpoint contains information about a single Daemon endpoint.", "Port": "Port number of the given endpoint.", @@ -460,7 +510,7 @@ func (DownwardAPIProjection) SwaggerDoc() map[string]string { var map_DownwardAPIVolumeFile = map[string]string{ "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.", "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", "mode": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } @@ -481,8 +531,8 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { var map_EmptyDirVolumeSource = map[string]string{ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "sizeLimit": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "medium": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -491,7 +541,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { var map_EndpointAddress = map[string]string{ "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", "hostname": "The Hostname of this endpoint", "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", "targetRef": "Reference to object providing the endpoint.", @@ -506,7 +556,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -579,7 +629,7 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_EphemeralContainer = map[string]string{ - "": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.\n\nThis is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate.", + "": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.", "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.", } @@ -590,14 +640,16 @@ func (EphemeralContainer) SwaggerDoc() map[string]string { var map_EphemeralContainerCommon = map[string]string{ "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", - "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "image": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "command": "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "Ports are not allowed for ephemeral containers.", "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "resizePolicy": "Resources resize policy for the container.", + "restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.", "volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", "livenessProbe": "Probes are not allowed for ephemeral containers.", @@ -690,11 +742,11 @@ func (ExecAction) SwaggerDoc() map[string]string { var map_FCVolumeSource = map[string]string{ "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", - "targetWWNs": "Optional: FC target worldwide names (WWNs)", - "lun": "Optional: FC target lun number", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "wwids": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + "targetWWNs": "targetWWNs is Optional: FC target worldwide names (WWNs)", + "lun": "lun is Optional: FC target lun number", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "wwids": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", } func (FCVolumeSource) SwaggerDoc() map[string]string { @@ -703,11 +755,11 @@ func (FCVolumeSource) SwaggerDoc() map[string]string { var map_FlexPersistentVolumeSource = map[string]string{ "": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", - "driver": "Driver is the name of the driver to use for this volume.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "options": "Optional: Extra command options if any.", + "driver": "driver is the name of the driver to use for this volume.", + "fsType": "fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "secretRef is Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "options is Optional: this field holds extra command options if any.", } func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -716,11 +768,11 @@ func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string { var map_FlexVolumeSource = map[string]string{ "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "driver": "Driver is the name of the driver to use for this volume.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "options": "Optional: Extra command options if any.", + "driver": "driver is the name of the driver to use for this volume.", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "options is Optional: this field holds extra command options if any.", } func (FlexVolumeSource) SwaggerDoc() map[string]string { @@ -729,8 +781,8 @@ func (FlexVolumeSource) SwaggerDoc() map[string]string { var map_FlockerVolumeSource = map[string]string{ "": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", - "datasetName": "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", - "datasetUUID": "UUID of the dataset. This is unique identifier of a Flocker dataset", + "datasetName": "datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", + "datasetUUID": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", } func (FlockerVolumeSource) SwaggerDoc() map[string]string { @@ -739,10 +791,10 @@ func (FlockerVolumeSource) SwaggerDoc() map[string]string { var map_GCEPersistentDiskVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "pdName": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "fsType": "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "partition": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "readOnly": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", } func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { @@ -760,9 +812,9 @@ func (GRPCAction) SwaggerDoc() map[string]string { var map_GitRepoVolumeSource = map[string]string{ "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - "repository": "Repository URL", - "revision": "Commit hash for the specified revision.", - "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", + "repository": "repository is the URL", + "revision": "revision is the commit hash for the specified revision.", + "directory": "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", } func (GitRepoVolumeSource) SwaggerDoc() map[string]string { @@ -771,10 +823,10 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsPersistentVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -783,9 +835,9 @@ func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -807,7 +859,7 @@ func (HTTPGetAction) SwaggerDoc() map[string]string { var map_HTTPHeader = map[string]string{ "": "HTTPHeader describes a custom header to be used in HTTP probes", - "name": "The header field name", + "name": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", "value": "The header field value", } @@ -825,10 +877,19 @@ func (HostAlias) SwaggerDoc() map[string]string { return map_HostAlias } +var map_HostIP = map[string]string{ + "": "HostIP represents a single IP address allocated to the host.", + "ip": "IP is the IP address assigned to the host", +} + +func (HostIP) SwaggerDoc() map[string]string { + return map_HostIP +} + var map_HostPathVolumeSource = map[string]string{ "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", - "path": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "type": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "path": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", } func (HostPathVolumeSource) SwaggerDoc() map[string]string { @@ -837,17 +898,17 @@ func (HostPathVolumeSource) SwaggerDoc() map[string]string { var map_ISCSIPersistentVolumeSource = map[string]string{ "": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI Target Lun number.", - "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", - "chapAuthSession": "whether support iSCSI Session CHAP authentication", - "secretRef": "CHAP Secret for iSCSI target and initiator authentication", - "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + "targetPortal": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "iqn is Target iSCSI Qualified Name.", + "lun": "lun is iSCSI Target Lun number.", + "iscsiInterface": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "chapAuthSession defines whether support iSCSI Session CHAP authentication", + "secretRef": "secretRef is the CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", } func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -856,28 +917,38 @@ func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { var map_ISCSIVolumeSource = map[string]string{ "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI Target Lun number.", - "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", - "chapAuthSession": "whether support iSCSI Session CHAP authentication", - "secretRef": "CHAP Secret for iSCSI target and initiator authentication", - "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + "targetPortal": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "iqn is the target iSCSI Qualified Name.", + "lun": "lun represents iSCSI Target Lun number.", + "iscsiInterface": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "chapAuthSession defines whether support iSCSI Session CHAP authentication", + "secretRef": "secretRef is the CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", } func (ISCSIVolumeSource) SwaggerDoc() map[string]string { return map_ISCSIVolumeSource } +var map_ImageVolumeSource = map[string]string{ + "": "ImageVolumeSource represents a image volume resource.", + "reference": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", +} + +func (ImageVolumeSource) SwaggerDoc() map[string]string { + return map_ImageVolumeSource +} + var map_KeyToPath = map[string]string{ "": "Maps a string key to a path within a volume.", - "key": "The key to project.", - "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", - "mode": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "key": "key is the key to project.", + "path": "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "mode": "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } func (KeyToPath) SwaggerDoc() map[string]string { @@ -899,6 +970,7 @@ var map_LifecycleHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", + "sleep": "Sleep represents the duration that the container should sleep before being terminated.", } func (LifecycleHandler) SwaggerDoc() map[string]string { @@ -948,10 +1020,22 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string { return map_LimitRangeSpec } +var map_LinuxContainerUser = map[string]string{ + "": "LinuxContainerUser represents user identity information in Linux containers", + "uid": "UID is the primary uid initially attached to the first process in the container", + "gid": "GID is the primary gid initially attached to the first process in the container", + "supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container", +} + +func (LinuxContainerUser) SwaggerDoc() map[string]string { + return map_LinuxContainerUser +} + var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "ipMode": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.", "ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", } @@ -970,7 +1054,7 @@ func (LoadBalancerStatus) SwaggerDoc() map[string]string { var map_LocalObjectReference = map[string]string{ "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "name": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } func (LocalObjectReference) SwaggerDoc() map[string]string { @@ -979,19 +1063,29 @@ func (LocalObjectReference) SwaggerDoc() map[string]string { var map_LocalVolumeSource = map[string]string{ "": "Local represents directly-attached storage with node affinity (Beta feature)", - "path": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", - "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", + "path": "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", + "fsType": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", } func (LocalVolumeSource) SwaggerDoc() map[string]string { return map_LocalVolumeSource } +var map_ModifyVolumeStatus = map[string]string{ + "": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation", + "targetVolumeAttributesClassName": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled", + "status": "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.", +} + +func (ModifyVolumeStatus) SwaggerDoc() map[string]string { + return map_ModifyVolumeStatus +} + var map_NFSVolumeSource = map[string]string{ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - "server": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "path": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "server": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "path": "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "readOnly": "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", } func (NFSVolumeSource) SwaggerDoc() map[string]string { @@ -1123,6 +1217,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { return map_NodeDaemonEndpoints } +var map_NodeFeatures = map[string]string{ + "": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", + "supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", +} + +func (NodeFeatures) SwaggerDoc() map[string]string { + return map_NodeFeatures +} + var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -1142,13 +1245,24 @@ func (NodeProxyOptions) SwaggerDoc() map[string]string { return map_NodeProxyOptions } -var map_NodeResources = map[string]string{ - "": "NodeResources is an object for conveying resource information about a node. see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.", - "Capacity": "Capacity represents the available resources of a node", +var map_NodeRuntimeHandler = map[string]string{ + "": "NodeRuntimeHandler is a set of runtime handler information.", + "name": "Runtime handler name. Empty for the default runtime handler.", + "features": "Supported features.", +} + +func (NodeRuntimeHandler) SwaggerDoc() map[string]string { + return map_NodeRuntimeHandler } -func (NodeResources) SwaggerDoc() map[string]string { - return map_NodeResources +var map_NodeRuntimeHandlerFeatures = map[string]string{ + "": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", + "recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", + "userNamespaces": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.", +} + +func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string { + return map_NodeRuntimeHandlerFeatures } var map_NodeSelector = map[string]string{ @@ -1188,7 +1302,7 @@ var map_NodeSpec = map[string]string{ "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", - "configSource": "Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration", + "configSource": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.", "externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", } @@ -1198,17 +1312,19 @@ func (NodeSpec) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", "volumesInUse": "List of attachable volumes in use (mounted) by the node.", "volumesAttached": "List of volumes that are attached to the node.", "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.", + "runtimeHandlers": "The available runtime handlers.", + "features": "Features describes the set of features implemented by the CRI implementation.", } func (NodeStatus) SwaggerDoc() map[string]string { @@ -1222,9 +1338,9 @@ var map_NodeSystemInfo = map[string]string{ "bootID": "Boot ID reported by the node.", "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", - "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", + "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", "kubeletVersion": "Kubelet Version reported by the node.", - "kubeProxyVersion": "KubeProxy Version reported by the node.", + "kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.", "operatingSystem": "The Operating System reported by the node", "architecture": "The Architecture reported by the node", } @@ -1261,8 +1377,8 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_PersistentVolume = map[string]string{ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", - "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + "spec": "spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + "status": "status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", } func (PersistentVolume) SwaggerDoc() map[string]string { @@ -1272,8 +1388,8 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "spec": "spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "status": "status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } func (PersistentVolumeClaim) SwaggerDoc() map[string]string { @@ -1281,11 +1397,11 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimCondition = map[string]string{ - "": "PersistentVolumeClaimCondition contails details about state of pvc", - "lastProbeTime": "Last time we probed the condition.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", - "message": "Human-readable message indicating details about last transition.", + "": "PersistentVolumeClaimCondition contains details about state of pvc", + "lastProbeTime": "lastProbeTime is the time we probed the condition.", + "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", + "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.", + "message": "message is the human-readable message indicating details about last transition.", } func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { @@ -1295,7 +1411,7 @@ func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "items": "items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { @@ -1303,15 +1419,16 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimSpec = map[string]string{ - "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", - "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", - "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.", - "dataSourceRef": "Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.", + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "selector": "selector is a label query over volumes to consider for binding.", + "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", + "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1319,13 +1436,15 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "Represents the actual resources of the underlying volume.", - "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", - "allocatedResources": "The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "resizeStatus": "ResizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "capacity represents the actual resources of the underlying volume.", + "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", + "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1344,8 +1463,8 @@ func (PersistentVolumeClaimTemplate) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimVolumeSource = map[string]string{ "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", + "claimName": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "readOnly": "readOnly Will force the ReadOnly setting in VolumeMounts. Default false.", } func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { @@ -1355,7 +1474,7 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "items": "items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", } func (PersistentVolumeList) SwaggerDoc() map[string]string { @@ -1364,28 +1483,28 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string { var map_PersistentVolumeSource = map[string]string{ "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", - "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "local": "Local represents directly-attached storage with node affinity", - "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", - "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", + "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "hostPath": "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "nfs": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "local": "local represents directly-attached storage with node affinity", + "storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + "csi": "csi represents storage that is handled by an external CSI driver (Beta feature).", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1394,14 +1513,15 @@ func (PersistentVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeSpec = map[string]string{ "": "PersistentVolumeSpec is the specification of a persistent volume.", - "capacity": "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", - "accessModes": "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", - "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", - "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", - "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", - "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "capacity": "capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "accessModes": "accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", + "claimRef": "claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", + "persistentVolumeReclaimPolicy": "persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", + "storageClassName": "storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", + "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", - "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", + "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1409,10 +1529,11 @@ func (PersistentVolumeSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeStatus = map[string]string{ - "": "PersistentVolumeStatus is the current status of a persistent volume.", - "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", - "message": "A human-readable message indicating details about why the volume is in this state.", - "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "": "PersistentVolumeStatus is the current status of a persistent volume.", + "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", + "message": "message is a human-readable message indicating details about why the volume is in this state.", + "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { @@ -1421,8 +1542,8 @@ func (PersistentVolumeStatus) SwaggerDoc() map[string]string { var map_PhotonPersistentDiskVolumeSource = map[string]string{ "": "Represents a Photon Controller persistent disk resource.", - "pdID": "ID that identifies Photon Controller persistent disk", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "pdID": "pdID is the ID that identifies Photon Controller persistent disk", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", } func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { @@ -1452,10 +1573,12 @@ func (PodAffinity) SwaggerDoc() map[string]string { var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", - "labelSelector": "A label query over a set of resources, in this case pods.", - "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"", + "labelSelector": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", + "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", - "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.", + "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1534,8 +1657,8 @@ func (PodExecOptions) SwaggerDoc() map[string]string { } var map_PodIP = map[string]string{ - "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.", - "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", + "": "PodIP represents a single IP address allocated to the pod.", + "ip": "IP is the IP address assigned to the pod", } func (PodIP) SwaggerDoc() map[string]string { @@ -1605,18 +1728,50 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { return map_PodReadinessGate } +var map_PodResourceClaim = map[string]string{ + "": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", + "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", +} + +func (PodResourceClaim) SwaggerDoc() map[string]string { + return map_PodResourceClaim +} + +var map_PodResourceClaimStatus = map[string]string{ + "": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.", + "name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.", + "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", +} + +func (PodResourceClaimStatus) SwaggerDoc() map[string]string { + return map_PodResourceClaimStatus +} + +var map_PodSchedulingGate = map[string]string{ + "": "PodSchedulingGate is associated to a Pod to guard its scheduling.", + "name": "Name of the scheduling gate. Each scheduling gate must have a unique name field.", +} + +func (PodSchedulingGate) SwaggerDoc() map[string]string { + return map_PodSchedulingGate +} + var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. Note that this field cannot be set when spec.os.name is windows.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", - "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.", + "supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1637,39 +1792,42 @@ var map_PodSpec = map[string]string{ "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", - "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", "affinity": "If specified, the pod's scheduling constraints", "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", "tolerations": "If specified, the pod's tolerations.", - "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified.", "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", - "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup This is an alpha field and requires the IdentifyPodOS feature", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", + "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", + "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1683,14 +1841,17 @@ var map_PodStatus = map[string]string{ "message": "A human readable message indicating details about why the pod is in this condition.", "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", + "hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", + "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", - "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.", + "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.", + "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", + "resourceClaimStatuses": "Status of resource claims.", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1749,9 +1910,9 @@ func (PortStatus) SwaggerDoc() map[string]string { var map_PortworxVolumeSource = map[string]string{ "": "PortworxVolumeSource represents a Portworx volume resource.", - "volumeID": "VolumeID uniquely identifies a Portworx volume", - "fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "volumeID": "volumeID uniquely identifies a Portworx volume", + "fsType": "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", } func (PortworxVolumeSource) SwaggerDoc() map[string]string { @@ -1808,7 +1969,7 @@ var map_ProbeHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "TCPSocket specifies an action involving a TCP port.", - "grpc": "GRPC specifies an action involving a GRPC port. This is an alpha field and requires enabling GRPCContainerProbe feature gate.", + "grpc": "GRPC specifies an action involving a GRPC port.", } func (ProbeHandler) SwaggerDoc() map[string]string { @@ -1817,8 +1978,8 @@ func (ProbeHandler) SwaggerDoc() map[string]string { var map_ProjectedVolumeSource = map[string]string{ "": "Represents a projected volume source", - "sources": "list of volume projections", - "defaultMode": "Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "sources": "sources is the list of volume projections. Each entry in this list handles one source.", + "defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } func (ProjectedVolumeSource) SwaggerDoc() map[string]string { @@ -1827,12 +1988,12 @@ func (ProjectedVolumeSource) SwaggerDoc() map[string]string { var map_QuobyteVolumeSource = map[string]string{ "": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", - "registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", - "volume": "Volume is a string that references an already created Quobyte volume by name.", - "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", - "user": "User to map volume access to Defaults to serivceaccount user", - "group": "Group to map volume access to Default is no group", - "tenant": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", + "registry": "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "volume": "volume is a string that references an already created Quobyte volume by name.", + "readOnly": "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "user": "user to map volume access to Defaults to serivceaccount user", + "group": "group to map volume access to Default is no group", + "tenant": "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", } func (QuobyteVolumeSource) SwaggerDoc() map[string]string { @@ -1841,14 +2002,14 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { var map_RBDPersistentVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "monitors": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "fsType": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1857,14 +2018,14 @@ func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "monitors": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "fsType": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1921,7 +2082,7 @@ var map_ReplicationControllerSpec = map[string]string{ "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -1930,7 +2091,7 @@ func (ReplicationControllerSpec) SwaggerDoc() map[string]string { var map_ReplicationControllerStatus = map[string]string{ "": "ReplicationControllerStatus represents the current status of a replication controller.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", "readyReplicas": "The number of ready replicas for this replication controller.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", @@ -1942,6 +2103,16 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string { return map_ReplicationControllerStatus } +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + var map_ResourceFieldSelector = map[string]string{ "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", "containerName": "Container name: required for volumes, optional for env vars", @@ -1953,6 +2124,16 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { return map_ResourceFieldSelector } +var map_ResourceHealth = map[string]string{ + "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.", + "resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.", + "health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.", +} + +func (ResourceHealth) SwaggerDoc() map[string]string { + return map_ResourceHealth +} + var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1998,13 +2179,23 @@ func (ResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceRequirements = map[string]string{ "": "ResourceRequirements describes the compute resource requirements.", "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "claims": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", } func (ResourceRequirements) SwaggerDoc() map[string]string { return map_ResourceRequirements } +var map_ResourceStatus = map[string]string{ + "name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.", + "resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.", +} + +func (ResourceStatus) SwaggerDoc() map[string]string { + return map_ResourceStatus +} + var map_SELinuxOptions = map[string]string{ "": "SELinuxOptions are the labels to be applied to the container", "user": "User is a SELinux user label that applies to the container.", @@ -2019,16 +2210,16 @@ func (SELinuxOptions) SwaggerDoc() map[string]string { var map_ScaleIOPersistentVolumeSource = map[string]string{ "": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", - "gateway": "The host address of the ScaleIO API Gateway.", - "system": "The name of the storage system as configured in ScaleIO.", - "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", - "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", - "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", - "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", - "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", - "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "gateway": "gateway is the host address of the ScaleIO API Gateway.", + "system": "system is the name of the storage system as configured in ScaleIO.", + "secretRef": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "sslEnabled is the flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "volumeName": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", + "readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", } func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -2037,16 +2228,16 @@ func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string { var map_ScaleIOVolumeSource = map[string]string{ "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", - "gateway": "The host address of the ScaleIO API Gateway.", - "system": "The name of the storage system as configured in ScaleIO.", - "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", - "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", - "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", - "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", - "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", - "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "gateway": "gateway is the host address of the ScaleIO API Gateway.", + "system": "system is the name of the storage system as configured in ScaleIO.", + "secretRef": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "sslEnabled Flag enable/disable SSL communication with Gateway, default false", + "protectionDomain": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "volumeName": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + "readOnly": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", } func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { @@ -2076,7 +2267,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_SeccompProfile = map[string]string{ "": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.", "type": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", - "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".", + "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type.", } func (SeccompProfile) SwaggerDoc() map[string]string { @@ -2127,8 +2318,8 @@ func (SecretList) SwaggerDoc() map[string]string { var map_SecretProjection = map[string]string{ "": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", - "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the Secret or its key must be defined", + "items": "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "optional field specify whether the Secret or its key must be defined", } func (SecretProjection) SwaggerDoc() map[string]string { @@ -2137,8 +2328,8 @@ func (SecretProjection) SwaggerDoc() map[string]string { var map_SecretReference = map[string]string{ "": "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", - "name": "Name is unique within a namespace to reference a secret resource.", - "namespace": "Namespace defines the space within which the secret name must be unique.", + "name": "name is unique within a namespace to reference a secret resource.", + "namespace": "namespace defines the space within which the secret name must be unique.", } func (SecretReference) SwaggerDoc() map[string]string { @@ -2147,10 +2338,10 @@ func (SecretReference) SwaggerDoc() map[string]string { var map_SecretVolumeSource = map[string]string{ "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "defaultMode": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or its keys must be defined", + "secretName": "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "items": "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "optional field specify whether the Secret or its keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -2168,8 +2359,9 @@ var map_SecurityContext = map[string]string{ "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", - "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "procMount": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.", + "appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.", } func (SecurityContext) SwaggerDoc() map[string]string { @@ -2199,7 +2391,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", } @@ -2220,9 +2412,9 @@ func (ServiceAccountList) SwaggerDoc() map[string]string { var map_ServiceAccountTokenProjection = map[string]string{ "": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", - "audience": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", - "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", - "path": "Path is the path relative to the mount point of the file to project the token into.", + "audience": "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "expirationSeconds": "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "path": "path is the path relative to the mount point of the file to project the token into.", } func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { @@ -2243,7 +2435,7 @@ var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", @@ -2271,18 +2463,19 @@ var map_ServiceSpec = map[string]string{ "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.", "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/", "externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", - "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", - "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).", + "externalTrafficPolicy": "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set.", "publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", "ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", "ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", - "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.", + "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", - "internalTrafficPolicy": "InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \"Cluster\" routes internal traffic to a Service to all endpoints. \"Local\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \"Cluster\".", + "internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", + "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2308,13 +2501,22 @@ func (SessionAffinityConfig) SwaggerDoc() map[string]string { return map_SessionAffinityConfig } +var map_SleepAction = map[string]string{ + "": "SleepAction describes a \"sleep\" action.", + "seconds": "Seconds is the number of seconds to sleep.", +} + +func (SleepAction) SwaggerDoc() map[string]string { + return map_SleepAction +} + var map_StorageOSPersistentVolumeSource = map[string]string{ "": "Represents a StorageOS persistent volume resource.", - "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", - "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", + "volumeName": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", } func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -2323,11 +2525,11 @@ func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string { var map_StorageOSVolumeSource = map[string]string{ "": "Represents a StorageOS persistent volume resource.", - "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", - "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", + "volumeName": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", } func (StorageOSVolumeSource) SwaggerDoc() map[string]string { @@ -2399,11 +2601,15 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { } var map_TopologySpreadConstraint = map[string]string{ - "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", - "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", - "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", - "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", - "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", + "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. ", + "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", + "minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ", + "nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", } func (TopologySpreadConstraint) SwaggerDoc() map[string]string { @@ -2421,9 +2627,20 @@ func (TypedLocalObjectReference) SwaggerDoc() map[string]string { return map_TypedLocalObjectReference } +var map_TypedObjectReference = map[string]string{ + "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", + "namespace": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", +} + +func (TypedObjectReference) SwaggerDoc() map[string]string { + return map_TypedObjectReference +} + var map_Volume = map[string]string{ "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", - "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "name": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } func (Volume) SwaggerDoc() map[string]string { @@ -2441,22 +2658,35 @@ func (VolumeDevice) SwaggerDoc() map[string]string { } var map_VolumeMount = map[string]string{ - "": "VolumeMount describes a mounting of a Volume within a container.", - "name": "This must match the Name of a Volume.", - "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", - "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", - "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", + "": "VolumeMount describes a mounting of a Volume within a container.", + "name": "This must match the Name of a Volume.", + "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "recursiveReadOnly": "RecursiveReadOnly specifies whether read-only mounts should be handled recursively.\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason.\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None).\n\nIf this field is not specified, it is treated as an equivalent of Disabled.", + "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", } func (VolumeMount) SwaggerDoc() map[string]string { return map_VolumeMount } +var map_VolumeMountStatus = map[string]string{ + "": "VolumeMountStatus shows status of volume mounts.", + "name": "Name corresponds to the name of the original VolumeMount.", + "mountPath": "MountPath corresponds to the original VolumeMount.", + "readOnly": "ReadOnly corresponds to the original VolumeMount.", + "recursiveReadOnly": "RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, depending on the mount result.", +} + +func (VolumeMountStatus) SwaggerDoc() map[string]string { + return map_VolumeMountStatus +} + var map_VolumeNodeAffinity = map[string]string{ "": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", - "required": "Required specifies hard node constraints that must be met.", + "required": "required specifies hard node constraints that must be met.", } func (VolumeNodeAffinity) SwaggerDoc() map[string]string { @@ -2464,48 +2694,60 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string { } var map_VolumeProjection = map[string]string{ - "": "Projection that may be projected along with other supported volume types", - "secret": "information about the secret data to project", - "downwardAPI": "information about the downwardAPI data to project", - "configMap": "information about the configMap data to project", - "serviceAccountToken": "information about the serviceAccountToken data to project", + "": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.", + "secret": "secret information about the secret data to project", + "downwardAPI": "downwardAPI information about the downwardAPI data to project", + "configMap": "configMap information about the configMap data to project", + "serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project", + "clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", } func (VolumeProjection) SwaggerDoc() map[string]string { return map_VolumeProjection } +var map_VolumeResourceRequirements = map[string]string{ + "": "VolumeResourceRequirements describes the storage resource requirements for a volume.", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", +} + +func (VolumeResourceRequirements) SwaggerDoc() map[string]string { + return map_VolumeResourceRequirements +} + var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", - "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", - "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "projected": "Items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", - "csi": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", - "ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + "hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "emptyDir": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gitRepo": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "downwardAPI represents downward API about the pod that should populate this volume", + "fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "configMap represents a configMap that should populate this volume", + "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "projected items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + "ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", } func (VolumeSource) SwaggerDoc() map[string]string { @@ -2514,10 +2756,10 @@ func (VolumeSource) SwaggerDoc() map[string]string { var map_VsphereVirtualDiskVolumeSource = map[string]string{ "": "Represents a vSphere volume resource.", - "volumePath": "Path that identifies vSphere volume vmdk", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "storagePolicyName": "Storage Policy Based Management (SPBM) profile name.", - "storagePolicyID": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + "volumePath": "volumePath is the path that identifies vSphere volume vmdk", + "fsType": "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "storagePolicyName": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.", + "storagePolicyID": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", } func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { @@ -2539,7 +2781,7 @@ var map_WindowsSecurityContextOptions = map[string]string{ "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", + "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", } func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go index 5cf82a981..8c3cb87b8 100644 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -19,6 +19,10 @@ package v1 const ( LabelHostname = "kubernetes.io/hostname" + // Label value is the network location of kube-apiserver stored as + // Stored in APIServer Identity lease objects to view what address is used for peer proxy + AnnotationPeerAdvertiseAddress = "kubernetes.io/peer-advertise-address" + LabelTopologyZone = "topology.kubernetes.io/zone" LabelTopologyRegion = "topology.kubernetes.io/region" diff --git a/vendor/k8s.io/api/core/v1/well_known_taints.go b/vendor/k8s.io/api/core/v1/well_known_taints.go index 84d268197..a6d8c272b 100644 --- a/vendor/k8s.io/api/core/v1/well_known_taints.go +++ b/vendor/k8s.io/api/core/v1/well_known_taints.go @@ -45,4 +45,8 @@ const ( // TaintNodePIDPressure will be added when node has pid pressure // and removed when node has enough pid. TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" + + // TaintNodeOutOfService can be added when node is out of service in case of + // a non-graceful shutdown + TaintNodeOutOfService = "node.kubernetes.io/out-of-service" ) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index fc951ad44..3d23f7f62 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -74,6 +74,27 @@ func (in *Affinity) DeepCopy() *Affinity { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppArmorProfile) DeepCopyInto(out *AppArmorProfile) { + *out = *in + if in.LocalhostProfile != nil { + in, out := &in.LocalhostProfile, &out.LocalhostProfile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppArmorProfile. +func (in *AppArmorProfile) DeepCopy() *AppArmorProfile { + if in == nil { + return nil + } + out := new(AppArmorProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { *out = *in @@ -243,6 +264,11 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } + if in.NodeExpandSecretRef != nil { + in, out := &in.NodeExpandSecretRef, &out.NodeExpandSecretRef + *out = new(SecretReference) + **out = **in + } return } @@ -435,6 +461,42 @@ func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) { + *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.SignerName != nil { + in, out := &in.SignerName, &out.SignerName + *out = new(string) + **out = **in + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection. +func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection { + if in == nil { + return nil + } + out := new(ClusterTrustBundleProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { *out = *in @@ -757,6 +819,16 @@ func (in *Container) DeepCopyInto(out *Container) { } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } + if in.RestartPolicy != nil { + in, out := &in.RestartPolicy, &out.RestartPolicy + *out = new(ContainerRestartPolicy) + **out = **in + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -844,6 +916,22 @@ func (in *ContainerPort) DeepCopy() *ContainerPort { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy. +func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { + if in == nil { + return nil + } + out := new(ContainerResizePolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerState) DeepCopyInto(out *ContainerState) { *out = *in @@ -936,6 +1024,37 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = new(bool) **out = **in } + if in.AllocatedResources != nil { + in, out := &in.AllocatedResources, &out.AllocatedResources + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMountStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(ContainerUser) + (*in).DeepCopyInto(*out) + } + if in.AllocatedResourcesStatus != nil { + in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus + *out = make([]ResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -949,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerUser) DeepCopyInto(out *ContainerUser) { + *out = *in + if in.Linux != nil { + in, out := &in.Linux, &out.Linux + *out = new(LinuxContainerUser) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser. +func (in *ContainerUser) DeepCopy() *ContainerUser { + if in == nil { + return nil + } + out := new(ContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { *out = *in @@ -1351,6 +1491,16 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) } } in.Resources.DeepCopyInto(&out.Resources) + if in.ResizePolicy != nil { + in, out := &in.ResizePolicy, &out.ResizePolicy + *out = make([]ContainerResizePolicy, len(*in)) + copy(*out, *in) + } + if in.RestartPolicy != nil { + in, out := &in.RestartPolicy, &out.RestartPolicy + *out = new(ContainerRestartPolicy) + **out = **in + } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts *out = make([]VolumeMount, len(*in)) @@ -1802,6 +1952,22 @@ func (in *HostAlias) DeepCopy() *HostAlias { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostIP) DeepCopyInto(out *HostIP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostIP. +func (in *HostIP) DeepCopy() *HostIP { + if in == nil { + return nil + } + out := new(HostIP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { *out = *in @@ -1885,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource. +func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource { + if in == nil { + return nil + } + out := new(ImageVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { *out = *in @@ -1950,6 +2132,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { *out = new(TCPSocketAction) **out = **in } + if in.Sleep != nil { + in, out := &in.Sleep, &out.Sleep + *out = new(SleepAction) + **out = **in + } return } @@ -2097,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) { + *out = *in + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser. +func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser { + if in == nil { + return nil + } + out := new(LinuxContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *List) DeepCopyInto(out *List) { *out = *in @@ -2133,6 +2341,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.IPMode != nil { + in, out := &in.IPMode, &out.IPMode + *out = new(LoadBalancerIPMode) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) @@ -2213,6 +2426,22 @@ func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus. +func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus { + if in == nil { + return nil + } + out := new(ModifyVolumeStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { *out = *in @@ -2510,6 +2739,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) { + *out = *in + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures. +func (in *NodeFeatures) DeepCopy() *NodeFeatures { + if in == nil { + return nil + } + out := new(NodeFeatures) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in @@ -2569,24 +2819,48 @@ func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { +func (in *NodeRuntimeHandler) DeepCopyInto(out *NodeRuntimeHandler) { *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(NodeRuntimeHandlerFeatures) + (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandler. +func (in *NodeRuntimeHandler) DeepCopy() *NodeRuntimeHandler { if in == nil { return nil } - out := new(NodeResources) + out := new(NodeRuntimeHandler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatures) { + *out = *in + if in.RecursiveReadOnlyMounts != nil { + in, out := &in.RecursiveReadOnlyMounts, &out.RecursiveReadOnlyMounts + *out = new(bool) + **out = **in + } + if in.UserNamespaces != nil { + in, out := &in.UserNamespaces, &out.UserNamespaces + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandlerFeatures. +func (in *NodeRuntimeHandlerFeatures) DeepCopy() *NodeRuntimeHandlerFeatures { + if in == nil { + return nil + } + out := new(NodeRuntimeHandlerFeatures) in.DeepCopyInto(out) return out } @@ -2751,6 +3025,18 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { *out = new(NodeConfigStatus) (*in).DeepCopyInto(*out) } + if in.RuntimeHandlers != nil { + in, out := &in.RuntimeHandlers, &out.RuntimeHandlers + *out = make([]NodeRuntimeHandler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(NodeFeatures) + (*in).DeepCopyInto(*out) + } return } @@ -2826,7 +3112,7 @@ func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -2958,9 +3244,14 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec } if in.DataSourceRef != nil { in, out := &in.DataSourceRef, &out.DataSourceRef - *out = new(TypedLocalObjectReference) + *out = new(TypedObjectReference) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3003,9 +3294,21 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*out)[key] = val.DeepCopy() } } - if in.ResizeStatus != nil { - in, out := &in.ResizeStatus, &out.ResizeStatus - *out = new(PersistentVolumeClaimResizeStatus) + if in.AllocatedResourceStatuses != nil { + in, out := &in.AllocatedResourceStatuses, &out.AllocatedResourceStatuses + *out = make(map[ResourceName]ClaimResourceStatus, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CurrentVolumeAttributesClassName != nil { + in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName + *out = new(string) + **out = **in + } + if in.ModifyVolumeStatus != nil { + in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus + *out = new(ModifyVolumeStatus) **out = **in } return @@ -3250,6 +3553,11 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = new(VolumeNodeAffinity) (*in).DeepCopyInto(*out) } + if in.VolumeAttributesClassName != nil { + in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName + *out = new(string) + **out = **in + } return } @@ -3266,6 +3574,10 @@ func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { *out = *in + if in.LastPhaseTransitionTime != nil { + in, out := &in.LastPhaseTransitionTime, &out.LastPhaseTransitionTime + *out = (*in).DeepCopy() + } return } @@ -3371,6 +3683,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MismatchLabelKeys != nil { + in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -3721,6 +4043,69 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) { + *out = *in + if in.ResourceClaimName != nil { + in, out := &in.ResourceClaimName, &out.ResourceClaimName + *out = new(string) + **out = **in + } + if in.ResourceClaimTemplateName != nil { + in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim. +func (in *PodResourceClaim) DeepCopy() *PodResourceClaim { + if in == nil { + return nil + } + out := new(PodResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodResourceClaimStatus) DeepCopyInto(out *PodResourceClaimStatus) { + *out = *in + if in.ResourceClaimName != nil { + in, out := &in.ResourceClaimName, &out.ResourceClaimName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaimStatus. +func (in *PodResourceClaimStatus) DeepCopy() *PodResourceClaimStatus { + if in == nil { + return nil + } + out := new(PodResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingGate. +func (in *PodSchedulingGate) DeepCopy() *PodSchedulingGate { + if in == nil { + return nil + } + out := new(PodSchedulingGate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = *in @@ -3754,6 +4139,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = make([]int64, len(*in)) copy(*out, *in) } + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(SupplementalGroupsPolicy) + **out = **in + } if in.FSGroup != nil { in, out := &in.FSGroup, &out.FSGroup *out = new(int64) @@ -3774,6 +4164,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(SeccompProfile) (*in).DeepCopyInto(*out) } + if in.AppArmorProfile != nil { + in, out := &in.AppArmorProfile, &out.AppArmorProfile + *out = new(AppArmorProfile) + (*in).DeepCopyInto(*out) + } return } @@ -3949,6 +4344,23 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(PodOS) **out = **in } + if in.HostUsers != nil { + in, out := &in.HostUsers, &out.HostUsers + *out = new(bool) + **out = **in + } + if in.SchedulingGates != nil { + in, out := &in.SchedulingGates, &out.SchedulingGates + *out = make([]PodSchedulingGate, len(*in)) + copy(*out, *in) + } + if in.ResourceClaims != nil { + in, out := &in.ResourceClaims, &out.ResourceClaims + *out = make([]PodResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -3972,6 +4384,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.HostIPs != nil { + in, out := &in.HostIPs, &out.HostIPs + *out = make([]HostIP, len(*in)) + copy(*out, *in) + } if in.PodIPs != nil { in, out := &in.PodIPs, &out.PodIPs *out = make([]PodIP, len(*in)) @@ -4002,6 +4419,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ResourceClaimStatuses != nil { + in, out := &in.ResourceClaimStatuses, &out.ResourceClaimStatuses + *out = make([]PodResourceClaimStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -4532,6 +4956,22 @@ func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { *out = *in @@ -4549,6 +4989,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth. +func (in *ResourceHealth) DeepCopy() *ResourceHealth { + if in == nil { + return nil + } + out := new(ResourceHealth) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in ResourceList) DeepCopyInto(out *ResourceList) { { @@ -4712,6 +5168,11 @@ func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { (*out)[key] = val.DeepCopy() } } + if in.Claims != nil { + in, out := &in.Claims, &out.Claims + *out = make([]ResourceClaim, len(*in)) + copy(*out, *in) + } return } @@ -4725,6 +5186,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceHealth, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { *out = *in @@ -5114,6 +5596,11 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = new(SeccompProfile) (*in).DeepCopyInto(*out) } + if in.AppArmorProfile != nil { + in, out := &in.AppArmorProfile, &out.AppArmorProfile + *out = new(AppArmorProfile) + (*in).DeepCopyInto(*out) + } return } @@ -5400,7 +5887,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.IPFamilyPolicy != nil { in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy - *out = new(IPFamilyPolicyType) + *out = new(IPFamilyPolicy) **out = **in } if in.AllocateLoadBalancerNodePorts != nil { @@ -5415,7 +5902,12 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { } if in.InternalTrafficPolicy != nil { in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicyType) + *out = new(ServiceInternalTrafficPolicy) + **out = **in + } + if in.TrafficDistribution != nil { + in, out := &in.TrafficDistribution, &out.TrafficDistribution + *out = new(string) **out = **in } return @@ -5476,6 +5968,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SleepAction) DeepCopyInto(out *SleepAction) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction. +func (in *SleepAction) DeepCopy() *SleepAction { + if in == nil { + return nil + } + out := new(SleepAction) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { *out = *in @@ -5644,6 +6152,26 @@ func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.MinDomains != nil { + in, out := &in.MinDomains, &out.MinDomains + *out = new(int32) + **out = **in + } + if in.NodeAffinityPolicy != nil { + in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy + *out = new(NodeInclusionPolicy) + **out = **in + } + if in.NodeTaintsPolicy != nil { + in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy + *out = new(NodeInclusionPolicy) + **out = **in + } + if in.MatchLabelKeys != nil { + in, out := &in.MatchLabelKeys, &out.MatchLabelKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -5678,6 +6206,32 @@ func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference. +func (in *TypedObjectReference) DeepCopy() *TypedObjectReference { + if in == nil { + return nil + } + out := new(TypedObjectReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Volume) DeepCopyInto(out *Volume) { *out = *in @@ -5714,6 +6268,11 @@ func (in *VolumeDevice) DeepCopy() *VolumeDevice { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in + if in.RecursiveReadOnly != nil { + in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly + *out = new(RecursiveReadOnlyMode) + **out = **in + } if in.MountPropagation != nil { in, out := &in.MountPropagation, &out.MountPropagation *out = new(MountPropagationMode) @@ -5732,6 +6291,27 @@ func (in *VolumeMount) DeepCopy() *VolumeMount { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMountStatus) DeepCopyInto(out *VolumeMountStatus) { + *out = *in + if in.RecursiveReadOnly != nil { + in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly + *out = new(RecursiveReadOnlyMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountStatus. +func (in *VolumeMountStatus) DeepCopy() *VolumeMountStatus { + if in == nil { + return nil + } + out := new(VolumeMountStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { *out = *in @@ -5776,6 +6356,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = new(ServiceAccountTokenProjection) (*in).DeepCopyInto(*out) } + if in.ClusterTrustBundle != nil { + in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle + *out = new(ClusterTrustBundleProjection) + (*in).DeepCopyInto(*out) + } return } @@ -5789,6 +6374,36 @@ func (in *VolumeProjection) DeepCopy() *VolumeProjection { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements. +func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements { + if in == nil { + return nil + } + out := new(VolumeResourceRequirements) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = *in @@ -5937,6 +6552,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(EphemeralVolumeSource) (*in).DeepCopyInto(*out) } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageVolumeSource) + **out = **in + } return } diff --git a/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..6710a96d1 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,274 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Binding) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Endpoints) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRange) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *List) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Namespace) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Node) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Pod) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 1 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Secret) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SecretList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Service) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/vendor/k8s.io/api/discovery/v1/doc.go b/vendor/k8s.io/api/discovery/v1/doc.go index 96ae531ce..01913669f 100644 --- a/vendor/k8s.io/api/discovery/v1/doc.go +++ b/vendor/k8s.io/api/discovery/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=discovery.k8s.io package v1 // import "k8s.io/api/discovery/v1" diff --git a/vendor/k8s.io/api/discovery/v1/generated.pb.go b/vendor/k8s.io/api/discovery/v1/generated.pb.go index 38bdb02a5..5792481dc 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1/generated.proto +// source: k8s.io/api/discovery/v1/generated.proto package v1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Endpoint) Reset() { *m = Endpoint{} } func (*Endpoint) ProtoMessage() {} func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{0} + return fileDescriptor_2237b452324cf77e, []int{0} } func (m *Endpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_Endpoint proto.InternalMessageInfo func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } func (*EndpointConditions) ProtoMessage() {} func (*EndpointConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{1} + return fileDescriptor_2237b452324cf77e, []int{1} } func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo func (m *EndpointHints) Reset() { *m = EndpointHints{} } func (*EndpointHints) ProtoMessage() {} func (*EndpointHints) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{2} + return fileDescriptor_2237b452324cf77e, []int{2} } func (m *EndpointHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_EndpointHints proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{3} + return fileDescriptor_2237b452324cf77e, []int{3} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } func (*EndpointSlice) ProtoMessage() {} func (*EndpointSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{4} + return fileDescriptor_2237b452324cf77e, []int{4} } func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } func (*EndpointSliceList) ProtoMessage() {} func (*EndpointSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{5} + return fileDescriptor_2237b452324cf77e, []int{5} } func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo func (m *ForZone) Reset() { *m = ForZone{} } func (*ForZone) ProtoMessage() {} func (*ForZone) Descriptor() ([]byte, []int) { - return fileDescriptor_3a5d310fb1396ddf, []int{6} + return fileDescriptor_2237b452324cf77e, []int{6} } func (m *ForZone) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,67 +254,66 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1/generated.proto", fileDescriptor_3a5d310fb1396ddf) -} - -var fileDescriptor_3a5d310fb1396ddf = []byte{ - // 889 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4d, 0x6f, 0xe3, 0x44, - 0x18, 0x8e, 0x9b, 0x86, 0xda, 0x93, 0x56, 0xec, 0x8e, 0x90, 0x36, 0x0a, 0x28, 0x0e, 0x46, 0x8b, - 0x22, 0x55, 0xd8, 0xb4, 0x42, 0x68, 0xe1, 0x44, 0xcd, 0x96, 0x5d, 0xbe, 0x4a, 0x35, 0xdb, 0xd3, - 0x0a, 0x69, 0x71, 0xed, 0xb7, 0x8e, 0x49, 0x33, 0x63, 0xcd, 0x4c, 0x22, 0x85, 0x13, 0x17, 0xce, - 0xf0, 0x8b, 0x38, 0xa2, 0x1e, 0xf7, 0xc6, 0x9e, 0x2c, 0x6a, 0xfe, 0x02, 0xa7, 0x3d, 0xa1, 0x19, - 0x7f, 0x96, 0xb4, 0x0a, 0xb7, 0x99, 0x67, 0x9e, 0xe7, 0xfd, 0x78, 0x66, 0xe6, 0x45, 0x9f, 0xcd, - 0x1e, 0x09, 0x37, 0x61, 0xde, 0x6c, 0x71, 0x0e, 0x9c, 0x82, 0x04, 0xe1, 0x2d, 0x81, 0x46, 0x8c, - 0x7b, 0xe5, 0x41, 0x90, 0x26, 0x5e, 0x94, 0x88, 0x90, 0x2d, 0x81, 0xaf, 0xbc, 0xe5, 0x81, 0x17, - 0x03, 0x05, 0x1e, 0x48, 0x88, 0xdc, 0x94, 0x33, 0xc9, 0xf0, 0x83, 0x82, 0xe8, 0x06, 0x69, 0xe2, - 0xd6, 0x44, 0x77, 0x79, 0x30, 0xfc, 0x20, 0x4e, 0xe4, 0x74, 0x71, 0xee, 0x86, 0x6c, 0xee, 0xc5, - 0x2c, 0x66, 0x9e, 0xe6, 0x9f, 0x2f, 0x2e, 0xf4, 0x4e, 0x6f, 0xf4, 0xaa, 0x88, 0x33, 0x74, 0x5a, - 0x09, 0x43, 0xc6, 0xe1, 0x96, 0x5c, 0xc3, 0x8f, 0x1a, 0xce, 0x3c, 0x08, 0xa7, 0x09, 0x55, 0x35, - 0xa5, 0xb3, 0x58, 0x01, 0xc2, 0x9b, 0x83, 0x0c, 0x6e, 0x53, 0x79, 0x77, 0xa9, 0xf8, 0x82, 0xca, - 0x64, 0x0e, 0x6b, 0x82, 0x8f, 0x37, 0x09, 0x44, 0x38, 0x85, 0x79, 0xf0, 0x5f, 0x9d, 0xf3, 0xcf, - 0x36, 0x32, 0x8f, 0x69, 0x94, 0xb2, 0x84, 0x4a, 0xbc, 0x8f, 0xac, 0x20, 0x8a, 0x38, 0x08, 0x01, - 0x62, 0x60, 0x8c, 0xbb, 0x13, 0xcb, 0xdf, 0xcb, 0x33, 0xdb, 0x3a, 0xaa, 0x40, 0xd2, 0x9c, 0xe3, - 0x17, 0x08, 0x85, 0x8c, 0x46, 0x89, 0x4c, 0x18, 0x15, 0x83, 0xad, 0xb1, 0x31, 0xe9, 0x1f, 0xee, - 0xbb, 0x77, 0x38, 0xeb, 0x56, 0x39, 0x3e, 0xaf, 0x25, 0x3e, 0xbe, 0xca, 0xec, 0x4e, 0x9e, 0xd9, - 0xa8, 0xc1, 0x48, 0x2b, 0x24, 0x9e, 0x20, 0x73, 0xca, 0x84, 0xa4, 0xc1, 0x1c, 0x06, 0xdd, 0xb1, - 0x31, 0xb1, 0xfc, 0xdd, 0x3c, 0xb3, 0xcd, 0xa7, 0x25, 0x46, 0xea, 0x53, 0x7c, 0x8a, 0x2c, 0x19, - 0xf0, 0x18, 0x24, 0x81, 0x8b, 0xc1, 0xb6, 0xae, 0xe4, 0xbd, 0x76, 0x25, 0xea, 0x6e, 0x54, 0x11, - 0xdf, 0x9d, 0xff, 0x08, 0xa1, 0x22, 0x01, 0x07, 0x1a, 0x42, 0xd1, 0xdc, 0x59, 0xa5, 0x24, 0x4d, - 0x10, 0xfc, 0x8b, 0x81, 0x70, 0x04, 0x29, 0x87, 0x50, 0x79, 0x75, 0xc6, 0x52, 0x76, 0xc9, 0xe2, - 0xd5, 0xa0, 0x37, 0xee, 0x4e, 0xfa, 0x87, 0x9f, 0x6c, 0xec, 0xd2, 0x7d, 0xbc, 0xa6, 0x3d, 0xa6, - 0x92, 0xaf, 0xfc, 0x61, 0xd9, 0x33, 0x5e, 0x27, 0x90, 0x5b, 0x12, 0x2a, 0x0f, 0x28, 0x8b, 0xe0, - 0x44, 0x79, 0xf0, 0x46, 0xe3, 0xc1, 0x49, 0x89, 0x91, 0xfa, 0x14, 0xbf, 0x83, 0xb6, 0x7f, 0x62, - 0x14, 0x06, 0x3b, 0x9a, 0x65, 0xe6, 0x99, 0xbd, 0xfd, 0x9c, 0x51, 0x20, 0x1a, 0xc5, 0x4f, 0x50, - 0x6f, 0x9a, 0x50, 0x29, 0x06, 0xa6, 0x76, 0xe7, 0xfd, 0x8d, 0x1d, 0x3c, 0x55, 0x6c, 0xdf, 0xca, - 0x33, 0xbb, 0xa7, 0x97, 0xa4, 0xd0, 0x0f, 0x8f, 0xd1, 0x83, 0x3b, 0x7a, 0xc3, 0xf7, 0x50, 0x77, - 0x06, 0xab, 0x81, 0xa1, 0x0a, 0x20, 0x6a, 0x89, 0xdf, 0x42, 0xbd, 0x65, 0x70, 0xb9, 0x00, 0xfd, - 0x3a, 0x2c, 0x52, 0x6c, 0x3e, 0xdd, 0x7a, 0x64, 0x38, 0xbf, 0x1a, 0x08, 0xaf, 0x3f, 0x09, 0x6c, - 0xa3, 0x1e, 0x87, 0x20, 0x2a, 0x82, 0x98, 0x45, 0x7a, 0xa2, 0x00, 0x52, 0xe0, 0xf8, 0x21, 0xda, - 0x11, 0xc0, 0x97, 0x09, 0x8d, 0x75, 0x4c, 0xd3, 0xef, 0xe7, 0x99, 0xbd, 0xf3, 0xac, 0x80, 0x48, - 0x75, 0x86, 0x0f, 0x50, 0x5f, 0x02, 0x9f, 0x27, 0x34, 0x90, 0x8a, 0xda, 0xd5, 0xd4, 0x37, 0xf3, - 0xcc, 0xee, 0x9f, 0x35, 0x30, 0x69, 0x73, 0x9c, 0x17, 0x68, 0xef, 0x46, 0xef, 0xf8, 0x04, 0x99, - 0x17, 0x8c, 0x2b, 0x0f, 0x8b, 0xbf, 0xd0, 0x3f, 0x1c, 0xdf, 0xe9, 0xda, 0x17, 0x05, 0xd1, 0xbf, - 0x57, 0x5e, 0xaf, 0x59, 0x02, 0x82, 0xd4, 0x31, 0x9c, 0x3f, 0x0c, 0xb4, 0x5b, 0x65, 0x38, 0x65, - 0x5c, 0xaa, 0x1b, 0xd3, 0x6f, 0xdb, 0x68, 0x6e, 0x4c, 0xdf, 0xa9, 0x46, 0xf1, 0x13, 0x64, 0xea, - 0x1f, 0x1a, 0xb2, 0xcb, 0xc2, 0x3e, 0x7f, 0x5f, 0x05, 0x3e, 0x2d, 0xb1, 0xd7, 0x99, 0xfd, 0xf6, - 0xfa, 0xf4, 0x71, 0xab, 0x63, 0x52, 0x8b, 0x55, 0x9a, 0x94, 0x71, 0xa9, 0x4d, 0xe8, 0x15, 0x69, - 0x54, 0x7a, 0xa2, 0x51, 0xe5, 0x54, 0x90, 0xa6, 0x95, 0x4c, 0x7f, 0x1e, 0xab, 0x70, 0xea, 0xa8, - 0x81, 0x49, 0x9b, 0xe3, 0xfc, 0xb9, 0xd5, 0x58, 0xf5, 0xec, 0x32, 0x09, 0x01, 0xff, 0x80, 0x4c, - 0x35, 0xc8, 0xa2, 0x40, 0x06, 0xba, 0x9b, 0xfe, 0xe1, 0x87, 0x2d, 0xab, 0xea, 0x79, 0xe4, 0xa6, - 0xb3, 0x58, 0x01, 0xc2, 0x55, 0xec, 0xe6, 0x43, 0x7e, 0x0b, 0x32, 0x68, 0xa6, 0x41, 0x83, 0x91, - 0x3a, 0x2a, 0x7e, 0x8c, 0xfa, 0xe5, 0xe4, 0x39, 0x5b, 0xa5, 0x50, 0x96, 0xe9, 0x94, 0x92, 0xfe, - 0x51, 0x73, 0xf4, 0xfa, 0xe6, 0x96, 0xb4, 0x65, 0x98, 0x20, 0x0b, 0xca, 0xc2, 0xd5, 0xc4, 0x52, - 0x77, 0xfa, 0xee, 0xc6, 0x9f, 0xe0, 0xdf, 0x2f, 0xd3, 0x58, 0x15, 0x22, 0x48, 0x13, 0x06, 0x7f, - 0x85, 0x7a, 0xca, 0x48, 0x31, 0xe8, 0xea, 0x78, 0x0f, 0x37, 0xc6, 0x53, 0xe6, 0xfb, 0x7b, 0x65, - 0xcc, 0x9e, 0xda, 0x09, 0x52, 0x84, 0x70, 0x7e, 0x37, 0xd0, 0xfd, 0x1b, 0xce, 0x7e, 0x93, 0x08, - 0x89, 0xbf, 0x5f, 0x73, 0xd7, 0xfd, 0x7f, 0xee, 0x2a, 0xb5, 0xf6, 0xb6, 0x7e, 0x96, 0x15, 0xd2, - 0x72, 0xf6, 0x6b, 0xd4, 0x4b, 0x24, 0xcc, 0x2b, 0x3f, 0x36, 0x4f, 0x06, 0x5d, 0x58, 0xd3, 0xc0, - 0x97, 0x4a, 0x4c, 0x8a, 0x18, 0xce, 0x3e, 0xda, 0x29, 0x5f, 0x3e, 0x1e, 0xdf, 0x78, 0xdd, 0xbb, - 0x25, 0xbd, 0xf5, 0xc2, 0xfd, 0xc9, 0xd5, 0xf5, 0xa8, 0xf3, 0xf2, 0x7a, 0xd4, 0x79, 0x75, 0x3d, - 0xea, 0xfc, 0x9c, 0x8f, 0x8c, 0xab, 0x7c, 0x64, 0xbc, 0xcc, 0x47, 0xc6, 0xab, 0x7c, 0x64, 0xfc, - 0x95, 0x8f, 0x8c, 0xdf, 0xfe, 0x1e, 0x75, 0x9e, 0x6f, 0x2d, 0x0f, 0xfe, 0x0d, 0x00, 0x00, 0xff, - 0xff, 0x66, 0x0f, 0x26, 0x7b, 0xf2, 0x07, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/discovery/v1/generated.proto", fileDescriptor_2237b452324cf77e) +} + +var fileDescriptor_2237b452324cf77e = []byte{ + // 877 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4d, 0x6f, 0xdc, 0x44, + 0x18, 0x5e, 0x67, 0x63, 0x62, 0x8f, 0x13, 0xd1, 0x8e, 0x90, 0x62, 0x2d, 0xc8, 0x5e, 0x8c, 0x0a, + 0x2b, 0x45, 0x78, 0x49, 0x84, 0x50, 0x41, 0xe2, 0x10, 0xd3, 0xd0, 0xf2, 0x15, 0xa2, 0x69, 0x4e, + 0x15, 0x52, 0x71, 0xec, 0x37, 0x5e, 0x93, 0xd8, 0x63, 0x79, 0x26, 0x2b, 0x2d, 0x27, 0x2e, 0x9c, + 0xe1, 0x17, 0x71, 0x44, 0x39, 0xf6, 0x46, 0x4f, 0x16, 0x31, 0x7f, 0x81, 0x53, 0x4f, 0x68, 0xc6, + 0x9f, 0x61, 0xb3, 0xda, 0xde, 0x3c, 0xcf, 0x3c, 0xcf, 0xfb, 0xf1, 0xcc, 0xcc, 0x6b, 0xf4, 0xc1, + 0xc5, 0x43, 0xe6, 0xc6, 0x74, 0xea, 0x67, 0xf1, 0x34, 0x8c, 0x59, 0x40, 0xe7, 0x90, 0x2f, 0xa6, + 0xf3, 0xfd, 0x69, 0x04, 0x29, 0xe4, 0x3e, 0x87, 0xd0, 0xcd, 0x72, 0xca, 0x29, 0xde, 0xad, 0x88, + 0xae, 0x9f, 0xc5, 0x6e, 0x4b, 0x74, 0xe7, 0xfb, 0xa3, 0x0f, 0xa3, 0x98, 0xcf, 0xae, 0xce, 0xdc, + 0x80, 0x26, 0xd3, 0x88, 0x46, 0x74, 0x2a, 0xf9, 0x67, 0x57, 0xe7, 0x72, 0x25, 0x17, 0xf2, 0xab, + 0x8a, 0x33, 0x72, 0x7a, 0x09, 0x03, 0x9a, 0xc3, 0x1d, 0xb9, 0x46, 0x1f, 0x77, 0x9c, 0xc4, 0x0f, + 0x66, 0x71, 0x2a, 0x6a, 0xca, 0x2e, 0x22, 0x01, 0xb0, 0x69, 0x02, 0xdc, 0xbf, 0x4b, 0x35, 0x5d, + 0xa5, 0xca, 0xaf, 0x52, 0x1e, 0x27, 0xb0, 0x24, 0xf8, 0x64, 0x9d, 0x80, 0x05, 0x33, 0x48, 0xfc, + 0xff, 0xeb, 0x9c, 0x7f, 0x37, 0x91, 0x76, 0x94, 0x86, 0x19, 0x8d, 0x53, 0x8e, 0xf7, 0x90, 0xee, + 0x87, 0x61, 0x0e, 0x8c, 0x01, 0x33, 0x95, 0xf1, 0x70, 0xa2, 0x7b, 0x3b, 0x65, 0x61, 0xeb, 0x87, + 0x0d, 0x48, 0xba, 0x7d, 0xfc, 0x1c, 0xa1, 0x80, 0xa6, 0x61, 0xcc, 0x63, 0x9a, 0x32, 0x73, 0x63, + 0xac, 0x4c, 0x8c, 0x83, 0x3d, 0x77, 0x85, 0xb3, 0x6e, 0x93, 0xe3, 0x8b, 0x56, 0xe2, 0xe1, 0xeb, + 0xc2, 0x1e, 0x94, 0x85, 0x8d, 0x3a, 0x8c, 0xf4, 0x42, 0xe2, 0x09, 0xd2, 0x66, 0x94, 0xf1, 0xd4, + 0x4f, 0xc0, 0x1c, 0x8e, 0x95, 0x89, 0xee, 0x6d, 0x97, 0x85, 0xad, 0x3d, 0xa9, 0x31, 0xd2, 0xee, + 0xe2, 0x13, 0xa4, 0x73, 0x3f, 0x8f, 0x80, 0x13, 0x38, 0x37, 0x37, 0x65, 0x25, 0xef, 0xf5, 0x2b, + 0x11, 0x67, 0x23, 0x8a, 0xf8, 0xfe, 0xec, 0x27, 0x08, 0x04, 0x09, 0x72, 0x48, 0x03, 0xa8, 0x9a, + 0x3b, 0x6d, 0x94, 0xa4, 0x0b, 0x82, 0x7f, 0x55, 0x10, 0x0e, 0x21, 0xcb, 0x21, 0x10, 0x5e, 0x9d, + 0xd2, 0x8c, 0x5e, 0xd2, 0x68, 0x61, 0xaa, 0xe3, 0xe1, 0xc4, 0x38, 0xf8, 0x74, 0x6d, 0x97, 0xee, + 0xa3, 0x25, 0xed, 0x51, 0xca, 0xf3, 0x85, 0x37, 0xaa, 0x7b, 0xc6, 0xcb, 0x04, 0x72, 0x47, 0x42, + 0xe1, 0x41, 0x4a, 0x43, 0x38, 0x16, 0x1e, 0xbc, 0xd1, 0x79, 0x70, 0x5c, 0x63, 0xa4, 0xdd, 0xc5, + 0xef, 0xa0, 0xcd, 0x9f, 0x69, 0x0a, 0xe6, 0x96, 0x64, 0x69, 0x65, 0x61, 0x6f, 0x3e, 0xa3, 0x29, + 0x10, 0x89, 0xe2, 0xc7, 0x48, 0x9d, 0xc5, 0x29, 0x67, 0xa6, 0x26, 0xdd, 0x79, 0x7f, 0x6d, 0x07, + 0x4f, 0x04, 0xdb, 0xd3, 0xcb, 0xc2, 0x56, 0xe5, 0x27, 0xa9, 0xf4, 0xa3, 0x23, 0xb4, 0xbb, 0xa2, + 0x37, 0x7c, 0x0f, 0x0d, 0x2f, 0x60, 0x61, 0x2a, 0xa2, 0x00, 0x22, 0x3e, 0xf1, 0x5b, 0x48, 0x9d, + 0xfb, 0x97, 0x57, 0x20, 0x6f, 0x87, 0x4e, 0xaa, 0xc5, 0x67, 0x1b, 0x0f, 0x15, 0xe7, 0x37, 0x05, + 0xe1, 0xe5, 0x2b, 0x81, 0x6d, 0xa4, 0xe6, 0xe0, 0x87, 0x55, 0x10, 0xad, 0x4a, 0x4f, 0x04, 0x40, + 0x2a, 0x1c, 0x3f, 0x40, 0x5b, 0x0c, 0xf2, 0x79, 0x9c, 0x46, 0x32, 0xa6, 0xe6, 0x19, 0x65, 0x61, + 0x6f, 0x3d, 0xad, 0x20, 0xd2, 0xec, 0xe1, 0x7d, 0x64, 0x70, 0xc8, 0x93, 0x38, 0xf5, 0xb9, 0xa0, + 0x0e, 0x25, 0xf5, 0xcd, 0xb2, 0xb0, 0x8d, 0xd3, 0x0e, 0x26, 0x7d, 0x8e, 0xf3, 0x1c, 0xed, 0xdc, + 0xea, 0x1d, 0x1f, 0x23, 0xed, 0x9c, 0xe6, 0xc2, 0xc3, 0xea, 0x2d, 0x18, 0x07, 0xe3, 0x95, 0xae, + 0x7d, 0x59, 0x11, 0xbd, 0x7b, 0xf5, 0xf1, 0x6a, 0x35, 0xc0, 0x48, 0x1b, 0xc3, 0xf9, 0x53, 0x41, + 0xdb, 0x4d, 0x86, 0x13, 0x9a, 0x73, 0x71, 0x62, 0xf2, 0x6e, 0x2b, 0xdd, 0x89, 0xc9, 0x33, 0x95, + 0x28, 0x7e, 0x8c, 0x34, 0xf9, 0x42, 0x03, 0x7a, 0x59, 0xd9, 0xe7, 0xed, 0x89, 0xc0, 0x27, 0x35, + 0xf6, 0xaa, 0xb0, 0xdf, 0x5e, 0x9e, 0x3e, 0x6e, 0xb3, 0x4d, 0x5a, 0xb1, 0x48, 0x93, 0xd1, 0x9c, + 0x4b, 0x13, 0xd4, 0x2a, 0x8d, 0x48, 0x4f, 0x24, 0x2a, 0x9c, 0xf2, 0xb3, 0xac, 0x91, 0xc9, 0xc7, + 0xa3, 0x57, 0x4e, 0x1d, 0x76, 0x30, 0xe9, 0x73, 0x9c, 0xbf, 0x36, 0x3a, 0xab, 0x9e, 0x5e, 0xc6, + 0x01, 0xe0, 0x1f, 0x91, 0x26, 0x06, 0x59, 0xe8, 0x73, 0x5f, 0x76, 0x63, 0x1c, 0x7c, 0xd4, 0xb3, + 0xaa, 0x9d, 0x47, 0x6e, 0x76, 0x11, 0x09, 0x80, 0xb9, 0x82, 0xdd, 0x3d, 0xc8, 0xef, 0x80, 0xfb, + 0xdd, 0x34, 0xe8, 0x30, 0xd2, 0x46, 0xc5, 0x8f, 0x90, 0x51, 0x4f, 0x9e, 0xd3, 0x45, 0x06, 0x75, + 0x99, 0x4e, 0x2d, 0x31, 0x0e, 0xbb, 0xad, 0x57, 0xb7, 0x97, 0xa4, 0x2f, 0xc3, 0x04, 0xe9, 0x50, + 0x17, 0x2e, 0x26, 0x96, 0x38, 0xd3, 0x77, 0xd7, 0xbe, 0x04, 0xef, 0x7e, 0x9d, 0x46, 0x6f, 0x10, + 0x46, 0xba, 0x30, 0xf8, 0x6b, 0xa4, 0x0a, 0x23, 0x99, 0x39, 0x94, 0xf1, 0x1e, 0xac, 0x8d, 0x27, + 0xcc, 0xf7, 0x76, 0xea, 0x98, 0xaa, 0x58, 0x31, 0x52, 0x85, 0x70, 0xfe, 0x50, 0xd0, 0xfd, 0x5b, + 0xce, 0x7e, 0x1b, 0x33, 0x8e, 0x7f, 0x58, 0x72, 0xd7, 0x7d, 0x3d, 0x77, 0x85, 0x5a, 0x7a, 0xdb, + 0x5e, 0xcb, 0x06, 0xe9, 0x39, 0xfb, 0x0d, 0x52, 0x63, 0x0e, 0x49, 0xe3, 0xc7, 0xfa, 0xc9, 0x20, + 0x0b, 0xeb, 0x1a, 0xf8, 0x4a, 0x88, 0x49, 0x15, 0xc3, 0xd9, 0x43, 0x5b, 0xf5, 0xcd, 0xc7, 0xe3, + 0x5b, 0xb7, 0x7b, 0xbb, 0xa6, 0xf7, 0x6e, 0xb8, 0xf7, 0xf9, 0xf5, 0x8d, 0x35, 0x78, 0x71, 0x63, + 0x0d, 0x5e, 0xde, 0x58, 0x83, 0x5f, 0x4a, 0x4b, 0xb9, 0x2e, 0x2d, 0xe5, 0x45, 0x69, 0x29, 0x2f, + 0x4b, 0x4b, 0xf9, 0xbb, 0xb4, 0x94, 0xdf, 0xff, 0xb1, 0x06, 0xcf, 0x76, 0x57, 0xfc, 0xd4, 0xff, + 0x0b, 0x00, 0x00, 0xff, 0xff, 0x76, 0x4b, 0x26, 0xe3, 0xee, 0x07, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 0e1e4a8c3..8ddf0dc5d 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/discovery/v1"; // Endpoint represents a single logical "backend" implementing a service. message Endpoint { @@ -35,7 +35,8 @@ message Endpoint { // according to the corresponding EndpointSlice addressType field. Consumers // must handle different types of addresses in the context of their own // capabilities. This must contain at least one address but no more than - // 100. + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set repeated string addresses = 1; @@ -53,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // deprecatedTopology contains topology information part of the v1beta1 // API. This field is deprecated, and will be removed when the v1beta1 @@ -65,8 +66,7 @@ message Endpoint { map deprecatedTopology = 5; // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional optional string nodeName = 6; @@ -86,22 +86,22 @@ message EndpointConditions { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional optional bool ready = 1; // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional optional bool serving = 2; // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional optional bool terminating = 3; } @@ -117,9 +117,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -127,21 +126,30 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional optional string appProtocol = 4; @@ -153,7 +161,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -183,9 +191,9 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 3c45ba3e7..d6a9d0fce 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -23,15 +23,18 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSlice represents a subset of the endpoints that implement a service. // For a given service there may be multiple EndpointSlice objects, selected by // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -40,10 +43,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -61,8 +66,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -73,11 +80,14 @@ type Endpoint struct { // according to the corresponding EndpointSlice addressType field. Consumers // must handle different types of addresses in the context of their own // capabilities. This must contain at least one address but no more than - // 100. + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -85,6 +95,7 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional @@ -100,13 +111,14 @@ type Endpoint struct { DeprecatedTopology map[string]string `json:"deprecatedTopology,omitempty" protobuf:"bytes,5,opt,name=deprecatedTopology"` // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // zone is the name of the Zone this endpoint exists in. // +optional Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"` + // hints contains information associated with how an endpoint should be // consumed. // +optional @@ -119,22 +131,22 @@ type EndpointConditions struct { // according to whatever system is managing the endpoint. A nil value // indicates an unknown state. In most cases consumers should interpret this // unknown state as ready. For compatibility reasons, ready should never be - // "true" for terminating endpoints. + // "true" for terminating endpoints, except when the normal readiness + // behavior is being explicitly overridden, for example when the associated + // Service has set the publishNotReadyAddresses flag. // +optional Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"` // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } @@ -156,41 +168,55 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice // +structType=atomic type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as // mycompany.com/my-custom-protocol. // +optional AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index b424a1cf0..41c306056 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -24,17 +24,17 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", - "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267", "conditions": "conditions contains information about the current status of the endpoint.", "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", "deprecatedTopology": "deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead.", - "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node.", "zone": "zone is the name of the Zone this endpoint exists in.", "hints": "hints contains information associated with how an endpoint should be consumed.", } @@ -45,9 +45,9 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", - "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", - "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", - "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", } func (EndpointConditions) SwaggerDoc() map[string]string { @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -90,7 +90,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..362867c5b --- /dev/null +++ b/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go index e024cc0a1..46935574b 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto +// source: k8s.io/api/discovery/v1beta1/generated.proto package v1beta1 @@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Endpoint) Reset() { *m = Endpoint{} } func (*Endpoint) ProtoMessage() {} func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{0} + return fileDescriptor_6555bad15de200e0, []int{0} } func (m *Endpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +77,7 @@ var xxx_messageInfo_Endpoint proto.InternalMessageInfo func (m *EndpointConditions) Reset() { *m = EndpointConditions{} } func (*EndpointConditions) ProtoMessage() {} func (*EndpointConditions) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{1} + return fileDescriptor_6555bad15de200e0, []int{1} } func (m *EndpointConditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +105,7 @@ var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo func (m *EndpointHints) Reset() { *m = EndpointHints{} } func (*EndpointHints) ProtoMessage() {} func (*EndpointHints) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{2} + return fileDescriptor_6555bad15de200e0, []int{2} } func (m *EndpointHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +133,7 @@ var xxx_messageInfo_EndpointHints proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{3} + return fileDescriptor_6555bad15de200e0, []int{3} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +161,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSlice) Reset() { *m = EndpointSlice{} } func (*EndpointSlice) ProtoMessage() {} func (*EndpointSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{4} + return fileDescriptor_6555bad15de200e0, []int{4} } func (m *EndpointSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +189,7 @@ var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} } func (*EndpointSliceList) ProtoMessage() {} func (*EndpointSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{5} + return fileDescriptor_6555bad15de200e0, []int{5} } func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +217,7 @@ var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo func (m *ForZone) Reset() { *m = ForZone{} } func (*ForZone) ProtoMessage() {} func (*ForZone) Descriptor() ([]byte, []int) { - return fileDescriptor_ece80bbc872d519b, []int{6} + return fileDescriptor_6555bad15de200e0, []int{6} } func (m *ForZone) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -254,66 +254,65 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/discovery/v1beta1/generated.proto", fileDescriptor_ece80bbc872d519b) -} - -var fileDescriptor_ece80bbc872d519b = []byte{ - // 870 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x41, 0x8f, 0xe3, 0x34, - 0x14, 0x6e, 0xa6, 0x53, 0x9a, 0xb8, 0x33, 0x62, 0xd7, 0xe2, 0x50, 0x0d, 0xab, 0xa4, 0x0a, 0x5a, - 0x54, 0x31, 0xda, 0x84, 0x19, 0xad, 0xd0, 0x0a, 0x4e, 0x13, 0x18, 0x58, 0xa4, 0x65, 0x77, 0xe4, - 0x19, 0x09, 0x69, 0xc5, 0x01, 0x37, 0xf1, 0xa4, 0xa1, 0x53, 0x3b, 0xb2, 0xdd, 0x4a, 0xbd, 0xf1, - 0x0f, 0xe0, 0xb7, 0xf0, 0x17, 0x90, 0xd0, 0x1c, 0xf7, 0xb8, 0xa7, 0x88, 0x09, 0xff, 0x62, 0x4f, - 0xc8, 0x8e, 0x93, 0xb4, 0x14, 0x86, 0xde, 0xec, 0xcf, 0xef, 0xfb, 0xde, 0x7b, 0xdf, 0xb3, 0x0d, - 0xce, 0x67, 0xcf, 0x44, 0x90, 0xb1, 0x70, 0xb6, 0x98, 0x10, 0x4e, 0x89, 0x24, 0x22, 0x5c, 0x12, - 0x9a, 0x30, 0x1e, 0x9a, 0x03, 0x9c, 0x67, 0x61, 0x92, 0x89, 0x98, 0x2d, 0x09, 0x5f, 0x85, 0xcb, - 0x93, 0x09, 0x91, 0xf8, 0x24, 0x4c, 0x09, 0x25, 0x1c, 0x4b, 0x92, 0x04, 0x39, 0x67, 0x92, 0xc1, - 0x47, 0x55, 0x74, 0x80, 0xf3, 0x2c, 0x68, 0xa2, 0x03, 0x13, 0x7d, 0xf4, 0x24, 0xcd, 0xe4, 0x74, - 0x31, 0x09, 0x62, 0x36, 0x0f, 0x53, 0x96, 0xb2, 0x50, 0x93, 0x26, 0x8b, 0x6b, 0xbd, 0xd3, 0x1b, - 0xbd, 0xaa, 0xc4, 0x8e, 0xfc, 0xb5, 0xd4, 0x31, 0xe3, 0x24, 0x5c, 0x6e, 0x25, 0x3c, 0x7a, 0xda, - 0xc6, 0xcc, 0x71, 0x3c, 0xcd, 0xa8, 0xaa, 0x2e, 0x9f, 0xa5, 0x0a, 0x10, 0xe1, 0x9c, 0x48, 0xfc, - 0x6f, 0xac, 0xf0, 0xbf, 0x58, 0x7c, 0x41, 0x65, 0x36, 0x27, 0x5b, 0x84, 0xcf, 0xfe, 0x8f, 0x20, - 0xe2, 0x29, 0x99, 0xe3, 0x7f, 0xf2, 0xfc, 0xdf, 0xf6, 0x81, 0x7d, 0x4e, 0x93, 0x9c, 0x65, 0x54, - 0xc2, 0x63, 0xe0, 0xe0, 0x24, 0xe1, 0x44, 0x08, 0x22, 0x86, 0xd6, 0xa8, 0x3b, 0x76, 0xa2, 0xc3, - 0xb2, 0xf0, 0x9c, 0xb3, 0x1a, 0x44, 0xed, 0x39, 0x4c, 0x00, 0x88, 0x19, 0x4d, 0x32, 0x99, 0x31, - 0x2a, 0x86, 0x7b, 0x23, 0x6b, 0x3c, 0x38, 0xfd, 0x34, 0xb8, 0xcf, 0xde, 0xa0, 0x4e, 0xf4, 0x65, - 0xc3, 0x8b, 0xe0, 0x6d, 0xe1, 0x75, 0xca, 0xc2, 0x03, 0x2d, 0x86, 0xd6, 0x74, 0xe1, 0x18, 0xd8, - 0x53, 0x26, 0x24, 0xc5, 0x73, 0x32, 0xec, 0x8e, 0xac, 0xb1, 0x13, 0x1d, 0x94, 0x85, 0x67, 0x3f, - 0x37, 0x18, 0x6a, 0x4e, 0xe1, 0x05, 0x70, 0x24, 0xe6, 0x29, 0x91, 0x88, 0x5c, 0x0f, 0xf7, 0x75, - 0x39, 0x1f, 0xad, 0x97, 0xa3, 0x06, 0x14, 0x2c, 0x4f, 0x82, 0x57, 0x93, 0x9f, 0x48, 0xac, 0x82, - 0x08, 0x27, 0x34, 0x26, 0x55, 0x87, 0x57, 0x35, 0x13, 0xb5, 0x22, 0x70, 0x02, 0x6c, 0xc9, 0x72, - 0x76, 0xc3, 0xd2, 0xd5, 0xb0, 0x37, 0xea, 0x8e, 0x07, 0xa7, 0x4f, 0x77, 0xeb, 0x2f, 0xb8, 0x32, - 0xb4, 0x73, 0x2a, 0xf9, 0x2a, 0x7a, 0x60, 0x7a, 0xb4, 0x6b, 0x18, 0x35, 0xba, 0xaa, 0x3f, 0xca, - 0x12, 0xf2, 0x52, 0xf5, 0xf7, 0x5e, 0xdb, 0xdf, 0x4b, 0x83, 0xa1, 0xe6, 0x14, 0xbe, 0x00, 0xbd, - 0x69, 0x46, 0xa5, 0x18, 0xf6, 0x75, 0x6f, 0xc7, 0xbb, 0x95, 0xf2, 0x5c, 0x51, 0x22, 0xa7, 0x2c, - 0xbc, 0x9e, 0x5e, 0xa2, 0x4a, 0xe4, 0xe8, 0x0b, 0x70, 0xb8, 0x51, 0x24, 0x7c, 0x00, 0xba, 0x33, - 0xb2, 0x1a, 0x5a, 0xaa, 0x06, 0xa4, 0x96, 0xf0, 0x03, 0xd0, 0x5b, 0xe2, 0x9b, 0x05, 0xd1, 0xb3, - 0x75, 0x50, 0xb5, 0xf9, 0x7c, 0xef, 0x99, 0xe5, 0xff, 0x62, 0x01, 0xb8, 0x3d, 0x4b, 0xe8, 0x81, - 0x1e, 0x27, 0x38, 0xa9, 0x44, 0xec, 0x2a, 0x29, 0x52, 0x00, 0xaa, 0x70, 0xf8, 0x18, 0xf4, 0x05, - 0xe1, 0xcb, 0x8c, 0xa6, 0x5a, 0xd3, 0x8e, 0x06, 0x65, 0xe1, 0xf5, 0x2f, 0x2b, 0x08, 0xd5, 0x67, - 0xf0, 0x04, 0x0c, 0x24, 0xe1, 0xf3, 0x8c, 0x62, 0xa9, 0x42, 0xbb, 0x3a, 0xf4, 0xfd, 0xb2, 0xf0, - 0x06, 0x57, 0x2d, 0x8c, 0xd6, 0x63, 0xfc, 0x04, 0x1c, 0x6e, 0x74, 0x0c, 0x2f, 0x81, 0x7d, 0xcd, - 0xf8, 0x6b, 0x46, 0xcd, 0x4d, 0x1e, 0x9c, 0x3e, 0xbe, 0xdf, 0xb0, 0xaf, 0xab, 0xe8, 0x76, 0x58, - 0x06, 0x10, 0xa8, 0x11, 0xf2, 0xff, 0xb0, 0xc0, 0x41, 0x9d, 0xe6, 0x82, 0x71, 0x09, 0x1f, 0x81, - 0x7d, 0x7d, 0x33, 0xb5, 0x6b, 0x91, 0x5d, 0x16, 0xde, 0xbe, 0x9e, 0x9a, 0x46, 0xe1, 0x37, 0xc0, - 0xd6, 0x8f, 0x2c, 0x66, 0x37, 0x95, 0x87, 0xd1, 0xb1, 0x12, 0xbe, 0x30, 0xd8, 0xbb, 0xc2, 0xfb, - 0x70, 0xfb, 0x03, 0x09, 0xea, 0x63, 0xd4, 0x90, 0x55, 0x9a, 0x9c, 0x71, 0xa9, 0x9d, 0xe8, 0x55, - 0x69, 0x54, 0x7a, 0xa4, 0x51, 0x65, 0x17, 0xce, 0xf3, 0x9a, 0xa6, 0xaf, 0xbe, 0x53, 0xd9, 0x75, - 0xd6, 0xc2, 0x68, 0x3d, 0xc6, 0xbf, 0xdb, 0x6b, 0xfd, 0xba, 0xbc, 0xc9, 0x62, 0x02, 0x7f, 0x04, - 0xb6, 0xfa, 0x8b, 0x12, 0x2c, 0xb1, 0xee, 0x66, 0xf3, 0x2d, 0x37, 0x5f, 0x4a, 0x90, 0xcf, 0x52, - 0x05, 0x88, 0x40, 0x45, 0xb7, 0xcf, 0xe9, 0x3b, 0x22, 0x71, 0xfb, 0x96, 0x5b, 0x0c, 0x35, 0xaa, - 0xf0, 0x2b, 0x30, 0x30, 0x9f, 0xc7, 0xd5, 0x2a, 0x27, 0xa6, 0x4c, 0xdf, 0x50, 0x06, 0x67, 0xed, - 0xd1, 0xbb, 0xcd, 0x2d, 0x5a, 0xa7, 0xc1, 0xef, 0x81, 0x43, 0x4c, 0xe1, 0xea, 0xd3, 0x51, 0x83, - 0xfd, 0x78, 0xb7, 0x97, 0x10, 0x3d, 0x34, 0xb9, 0x9c, 0x1a, 0x11, 0xa8, 0xd5, 0x82, 0xaf, 0x40, - 0x4f, 0xb9, 0x29, 0x86, 0x5d, 0x2d, 0xfa, 0xc9, 0x6e, 0xa2, 0x6a, 0x0c, 0xd1, 0xa1, 0x11, 0xee, - 0xa9, 0x9d, 0x40, 0x95, 0x8e, 0xff, 0xbb, 0x05, 0x1e, 0x6e, 0x78, 0xfc, 0x22, 0x13, 0x12, 0xfe, - 0xb0, 0xe5, 0x73, 0xb0, 0x9b, 0xcf, 0x8a, 0xad, 0x5d, 0x6e, 0x2e, 0x68, 0x8d, 0xac, 0x79, 0x7c, - 0x01, 0x7a, 0x99, 0x24, 0xf3, 0xda, 0x99, 0x1d, 0xff, 0x08, 0x5d, 0x5d, 0xdb, 0xc5, 0xb7, 0x4a, - 0x01, 0x55, 0x42, 0xfe, 0x31, 0xe8, 0x9b, 0x87, 0x00, 0x47, 0x1b, 0x97, 0xfd, 0xc0, 0x84, 0xaf, - 0x5d, 0xf8, 0xe8, 0xc9, 0xed, 0x9d, 0xdb, 0x79, 0x73, 0xe7, 0x76, 0xde, 0xde, 0xb9, 0x9d, 0x9f, - 0x4b, 0xd7, 0xba, 0x2d, 0x5d, 0xeb, 0x4d, 0xe9, 0x5a, 0x6f, 0x4b, 0xd7, 0xfa, 0xb3, 0x74, 0xad, - 0x5f, 0xff, 0x72, 0x3b, 0xaf, 0xfb, 0x26, 0xff, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x0d, - 0x6f, 0x98, 0xd3, 0x07, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/discovery/v1beta1/generated.proto", fileDescriptor_6555bad15de200e0) +} + +var fileDescriptor_6555bad15de200e0 = []byte{ + // 857 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0xe4, 0x34, + 0x14, 0x9f, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d, + 0x1a, 0x51, 0x48, 0x68, 0xb5, 0x42, 0x2b, 0x38, 0x35, 0xb0, 0xb0, 0x48, 0xcb, 0x6e, 0xe5, 0x56, + 0x42, 0x5a, 0x71, 0xc0, 0x93, 0xb8, 0x19, 0xd3, 0x26, 0x8e, 0x62, 0x77, 0xa4, 0xb9, 0xf1, 0x0d, + 0xe0, 0xb3, 0xf0, 0x15, 0x90, 0x50, 0x8f, 0x7b, 0xdc, 0x53, 0xc4, 0x84, 0x6f, 0xb1, 0x27, 0x64, + 0xc7, 0xf9, 0x33, 0x0c, 0x94, 0xb9, 0xc5, 0x3f, 0xbf, 0xdf, 0xef, 0xbd, 0xf7, 0x7b, 0xb6, 0x03, + 0x3e, 0xbe, 0x7e, 0xc2, 0x7d, 0xca, 0x02, 0x9c, 0xd3, 0x20, 0xa6, 0x3c, 0x62, 0x0b, 0x52, 0x2c, + 0x83, 0xc5, 0xc9, 0x8c, 0x08, 0x7c, 0x12, 0x24, 0x24, 0x23, 0x05, 0x16, 0x24, 0xf6, 0xf3, 0x82, + 0x09, 0x06, 0x8f, 0xea, 0x68, 0x1f, 0xe7, 0xd4, 0x6f, 0xa3, 0x7d, 0x1d, 0x7d, 0xf8, 0x49, 0x42, + 0xc5, 0xfc, 0x76, 0xe6, 0x47, 0x2c, 0x0d, 0x12, 0x96, 0xb0, 0x40, 0x91, 0x66, 0xb7, 0x57, 0x6a, + 0xa5, 0x16, 0xea, 0xab, 0x16, 0x3b, 0xf4, 0x7a, 0xa9, 0x23, 0x56, 0x90, 0x60, 0xb1, 0x91, 0xf0, + 0xf0, 0x71, 0x17, 0x93, 0xe2, 0x68, 0x4e, 0x33, 0x59, 0x5d, 0x7e, 0x9d, 0x48, 0x80, 0x07, 0x29, + 0x11, 0xf8, 0xdf, 0x58, 0xc1, 0x7f, 0xb1, 0x8a, 0xdb, 0x4c, 0xd0, 0x94, 0x6c, 0x10, 0x3e, 0xfb, + 0x3f, 0x02, 0x8f, 0xe6, 0x24, 0xc5, 0xff, 0xe4, 0x79, 0xbf, 0xed, 0x02, 0xeb, 0x69, 0x16, 0xe7, + 0x8c, 0x66, 0x02, 0x1e, 0x03, 0x1b, 0xc7, 0x71, 0x41, 0x38, 0x27, 0x7c, 0x6c, 0x4c, 0x86, 0x53, + 0x3b, 0x3c, 0xa8, 0x4a, 0xd7, 0x3e, 0x6b, 0x40, 0xd4, 0xed, 0xc3, 0x18, 0x80, 0x88, 0x65, 0x31, + 0x15, 0x94, 0x65, 0x7c, 0xbc, 0x33, 0x31, 0xa6, 0xa3, 0xd3, 0x4f, 0xfd, 0xfb, 0xec, 0xf5, 0x9b, + 0x44, 0x5f, 0xb6, 0xbc, 0x10, 0xde, 0x95, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xa7, 0x0b, + 0xa7, 0xc0, 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xbf, 0x2a, + 0x5d, 0xeb, 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0xcf, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xd5, + 0x78, 0x57, 0x95, 0xf3, 0x41, 0xbf, 0x1c, 0x39, 0x20, 0x7f, 0x71, 0xe2, 0xbf, 0x9c, 0xfd, 0x44, + 0x22, 0x19, 0x44, 0x0a, 0x92, 0x45, 0xa4, 0xee, 0xf0, 0xb2, 0x61, 0xa2, 0x4e, 0x04, 0xce, 0x80, + 0x25, 0x58, 0xce, 0x6e, 0x58, 0xb2, 0x1c, 0x9b, 0x93, 0xe1, 0x74, 0x74, 0xfa, 0x78, 0xbb, 0xfe, + 0xfc, 0x4b, 0x4d, 0x7b, 0x9a, 0x89, 0x62, 0x19, 0x3e, 0xd0, 0x3d, 0x5a, 0x0d, 0x8c, 0x5a, 0x5d, + 0xd9, 0x5f, 0xc6, 0x62, 0xf2, 0x42, 0xf6, 0xf7, 0x4e, 0xd7, 0xdf, 0x0b, 0x8d, 0xa1, 0x76, 0x17, + 0x3e, 0x07, 0xe6, 0x9c, 0x66, 0x82, 0x8f, 0xf7, 0x54, 0x6f, 0xc7, 0xdb, 0x95, 0xf2, 0x4c, 0x52, + 0x42, 0xbb, 0x2a, 0x5d, 0x53, 0x7d, 0xa2, 0x5a, 0xe4, 0xf0, 0x0b, 0x70, 0xb0, 0x56, 0x24, 0x7c, + 0x00, 0x86, 0xd7, 0x64, 0x39, 0x36, 0x64, 0x0d, 0x48, 0x7e, 0xc2, 0xf7, 0x80, 0xb9, 0xc0, 0x37, + 0xb7, 0x44, 0xcd, 0xd6, 0x46, 0xf5, 0xe2, 0xf3, 0x9d, 0x27, 0x86, 0xf7, 0x8b, 0x01, 0xe0, 0xe6, + 0x2c, 0xa1, 0x0b, 0xcc, 0x82, 0xe0, 0xb8, 0x16, 0xb1, 0xea, 0xa4, 0x48, 0x02, 0xa8, 0xc6, 0xe1, + 0x23, 0xb0, 0xc7, 0x49, 0xb1, 0xa0, 0x59, 0xa2, 0x34, 0xad, 0x70, 0x54, 0x95, 0xee, 0xde, 0x45, + 0x0d, 0xa1, 0x66, 0x0f, 0x9e, 0x80, 0x91, 0x20, 0x45, 0x4a, 0x33, 0x2c, 0x64, 0xe8, 0x50, 0x85, + 0xbe, 0x5b, 0x95, 0xee, 0xe8, 0xb2, 0x83, 0x51, 0x3f, 0xc6, 0x8b, 0xc1, 0xc1, 0x5a, 0xc7, 0xf0, + 0x02, 0x58, 0x57, 0xac, 0x78, 0xc5, 0x32, 0x7d, 0x92, 0x47, 0xa7, 0x8f, 0xee, 0x37, 0xec, 0xeb, + 0x3a, 0xba, 0x1b, 0x96, 0x06, 0x38, 0x6a, 0x85, 0xbc, 0x3f, 0x0c, 0xb0, 0xdf, 0xa4, 0x39, 0x67, + 0x85, 0x80, 0x47, 0x60, 0x57, 0x9d, 0x4c, 0xe5, 0x5a, 0x68, 0x55, 0xa5, 0xbb, 0xab, 0xa6, 0xa6, + 0x50, 0xf8, 0x0d, 0xb0, 0xd4, 0x25, 0x8b, 0xd8, 0x4d, 0xed, 0x61, 0x78, 0x2c, 0x85, 0xcf, 0x35, + 0xf6, 0xb6, 0x74, 0xdf, 0xdf, 0x7c, 0x40, 0xfc, 0x66, 0x1b, 0xb5, 0x64, 0x99, 0x26, 0x67, 0x85, + 0x50, 0x4e, 0x98, 0x75, 0x1a, 0x99, 0x1e, 0x29, 0x54, 0xda, 0x85, 0xf3, 0xbc, 0xa1, 0xa9, 0xa3, + 0x6f, 0xd7, 0x76, 0x9d, 0x75, 0x30, 0xea, 0xc7, 0x78, 0xab, 0x9d, 0xce, 0xaf, 0x8b, 0x1b, 0x1a, + 0x11, 0xf8, 0x23, 0xb0, 0xe4, 0x5b, 0x14, 0x63, 0x81, 0x55, 0x37, 0xeb, 0x77, 0xb9, 0x7d, 0x52, + 0xfc, 0xfc, 0x3a, 0x91, 0x00, 0xf7, 0x65, 0x74, 0x77, 0x9d, 0xbe, 0x23, 0x02, 0x77, 0x77, 0xb9, + 0xc3, 0x50, 0xab, 0x0a, 0xbf, 0x02, 0x23, 0xfd, 0x78, 0x5c, 0x2e, 0x73, 0xa2, 0xcb, 0xf4, 0x34, + 0x65, 0x74, 0xd6, 0x6d, 0xbd, 0x5d, 0x5f, 0xa2, 0x3e, 0x0d, 0x7e, 0x0f, 0x6c, 0xa2, 0x0b, 0x97, + 0x8f, 0x8e, 0x1c, 0xec, 0x87, 0xdb, 0xdd, 0x84, 0xf0, 0xa1, 0xce, 0x65, 0x37, 0x08, 0x47, 0x9d, + 0x16, 0x7c, 0x09, 0x4c, 0xe9, 0x26, 0x1f, 0x0f, 0x95, 0xe8, 0x47, 0xdb, 0x89, 0xca, 0x31, 0x84, + 0x07, 0x5a, 0xd8, 0x94, 0x2b, 0x8e, 0x6a, 0x1d, 0xef, 0x77, 0x03, 0x3c, 0x5c, 0xf3, 0xf8, 0x39, + 0xe5, 0x02, 0xfe, 0xb0, 0xe1, 0xb3, 0xbf, 0x9d, 0xcf, 0x92, 0xad, 0x5c, 0x6e, 0x0f, 0x68, 0x83, + 0xf4, 0x3c, 0x3e, 0x07, 0x26, 0x15, 0x24, 0x6d, 0x9c, 0xd9, 0xf2, 0x8d, 0x50, 0xd5, 0x75, 0x5d, + 0x7c, 0x2b, 0x15, 0x50, 0x2d, 0xe4, 0x1d, 0x83, 0x3d, 0x7d, 0x11, 0xe0, 0x64, 0xed, 0xb0, 0xef, + 0xeb, 0xf0, 0xde, 0x81, 0x0f, 0xc3, 0xbb, 0x95, 0x33, 0x78, 0xbd, 0x72, 0x06, 0x6f, 0x56, 0xce, + 0xe0, 0xe7, 0xca, 0x31, 0xee, 0x2a, 0xc7, 0x78, 0x5d, 0x39, 0xc6, 0x9b, 0xca, 0x31, 0xfe, 0xac, + 0x1c, 0xe3, 0xd7, 0xbf, 0x9c, 0xc1, 0xab, 0xa3, 0xfb, 0x7e, 0xd8, 0x7f, 0x07, 0x00, 0x00, 0xff, + 0xff, 0x1c, 0xe6, 0x20, 0x06, 0xcf, 0x07, 0x00, 0x00, } func (m *Endpoint) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index 6925f7ce3..55828dd97 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/discovery/v1beta1"; // Endpoint represents a single logical "backend" implementing a service. message Endpoint { @@ -35,7 +35,8 @@ message Endpoint { // according to the corresponding EndpointSlice addressType field. Consumers // must handle different types of addresses in the context of their own // capabilities. This must contain at least one address but no more than - // 100. + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set repeated string addresses = 1; @@ -53,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. @@ -72,8 +73,7 @@ message Endpoint { map topology = 5; // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional optional string nodeName = 6; @@ -97,15 +97,13 @@ message EndpointConditions { // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional optional bool serving = 2; // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional optional bool terminating = 3; } @@ -120,9 +118,8 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -130,20 +127,20 @@ message EndpointPort { // Default is empty string. optional string name = 1; - // The IP protocol for this port. + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. optional string protocol = 2; - // The port number of the endpoint. + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. optional int32 port = 3; - // The application protocol for this port. + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). + // RFC-6335 and https://www.iana.org/assignments/service-names). // Non-standard protocols should use prefixed names such as // mycompany.com/my-custom-protocol. // +optional @@ -156,7 +153,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -186,9 +183,9 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // List of endpoint slices + // items is the list of endpoint slices repeated EndpointSlice items = 2; } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index eeb46e175..defd8e2ce 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -33,9 +33,11 @@ import ( // labels, which must be joined to produce the full set of endpoints. type EndpointSlice struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is // immutable after creation. The following address types are currently @@ -44,10 +46,12 @@ type EndpointSlice struct { // * IPv6: Represents an IPv6 Address. // * FQDN: Represents a Fully Qualified Domain Name. AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"` + // endpoints is a list of unique endpoints in this slice. Each slice may // include a maximum of 1000 endpoints. // +listType=atomic Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"` + // ports specifies the list of network ports exposed by each endpoint in // this slice. Each port must have a unique name. When ports is empty, it // indicates that there are no defined ports. When a port is defined with a @@ -64,8 +68,10 @@ type AddressType string const ( // AddressTypeIPv4 represents an IPv4 Address. AddressTypeIPv4 = AddressType(v1.IPv4Protocol) + // AddressTypeIPv6 represents an IPv6 Address. AddressTypeIPv6 = AddressType(v1.IPv6Protocol) + // AddressTypeFQDN represents a FQDN. AddressTypeFQDN = AddressType("FQDN") ) @@ -76,11 +82,14 @@ type Endpoint struct { // according to the corresponding EndpointSlice addressType field. Consumers // must handle different types of addresses in the context of their own // capabilities. This must contain at least one address but no more than - // 100. + // 100. These are all assumed to be fungible and clients may choose to only + // use the first element. Refer to: https://issue.k8s.io/106267 // +listType=set Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"` + // conditions contains information about the current status of the endpoint. Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"` + // hostname of this endpoint. This field may be used by consumers of // endpoints to distinguish endpoints from each other (e.g. in DNS names). // Multiple endpoints which use the same hostname should be considered @@ -88,10 +97,12 @@ type Endpoint struct { // Label (RFC 1123) validation. // +optional Hostname *string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,4,opt,name=targetRef"` + // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels @@ -107,11 +118,12 @@ type Endpoint struct { // This field is deprecated and will be removed in future api versions. // +optional Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"` + // nodeName represents the name of the Node hosting this endpoint. This can - // be used to determine endpoints local to a Node. This field can be enabled - // with the EndpointSliceNodeName feature gate. + // be used to determine endpoints local to a Node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"` + // hints contains information associated with how an endpoint should be // consumed. // +featureGate=TopologyAwareHints @@ -132,15 +144,13 @@ type EndpointConditions struct { // serving is identical to ready except that it is set regardless of the // terminating state of endpoints. This condition should be set to true for // a ready endpoint that is terminating. If nil, consumers should defer to - // the ready condition. This field can be enabled with the - // EndpointSliceTerminatingCondition feature gate. + // the ready condition. // +optional Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"` // terminating indicates that this endpoint is terminating. A nil value // indicates an unknown state. Consumers should interpret this unknown state - // to mean that the endpoint is not terminating. This field can be enabled - // with the EndpointSliceTerminatingCondition feature gate. + // to mean that the endpoint is not terminating. // +optional Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"` } @@ -161,27 +171,29 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { - // The name of this port. All ports in an EndpointSlice must have a unique - // name. If the EndpointSlice is dervied from a Kubernetes service, this - // corresponds to the Service.ports[].name. + // name represents the name of this port. All ports in an EndpointSlice must have a unique name. + // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. // * must start and end with an alphanumeric character. // Default is empty string. Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - // The IP protocol for this port. + + // protocol represents the IP protocol for this port. // Must be UDP, TCP, or SCTP. // Default is TCP. Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"` - // The port number of the endpoint. + + // port represents the port number of the endpoint. // If this is not specified, ports are not restricted and must be // interpreted in the context of the specific consumer. Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"` - // The application protocol for this port. + + // appProtocol represents the application protocol for this port. // This field follows standard Kubernetes label syntax. // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). + // RFC-6335 and https://www.iana.org/assignments/service-names). // Non-standard protocols should use prefixed names such as // mycompany.com/my-custom-protocol. // +optional @@ -197,9 +209,11 @@ type EndpointPort struct { // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of endpoint slices + + // items is the list of endpoint slices Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index b4c221999..847d4d58e 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -24,17 +24,17 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Endpoint = map[string]string{ "": "Endpoint represents a single logical \"backend\" implementing a service.", - "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100.", + "addresses": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267", "conditions": "conditions contains information about the current status of the endpoint.", "hostname": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", "targetRef": "targetRef is a reference to a Kubernetes object that represents this endpoint.", "topology": "topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node\n where the endpoint is located. This should match the corresponding\n node label.\n* topology.kubernetes.io/zone: the value indicates the zone where the\n endpoint is located. This should match the corresponding node label.\n* topology.kubernetes.io/region: the value indicates the region where the\n endpoint is located. This should match the corresponding node label.\nThis field is deprecated and will be removed in future api versions.", - "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate.", + "nodeName": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node.", "hints": "hints contains information associated with how an endpoint should be consumed.", } @@ -45,8 +45,8 @@ func (Endpoint) SwaggerDoc() map[string]string { var map_EndpointConditions = map[string]string{ "": "EndpointConditions represents the current condition of an endpoint.", "ready": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints.", - "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", - "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate.", + "serving": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", + "terminating": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", } func (EndpointConditions) SwaggerDoc() map[string]string { @@ -64,10 +64,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "port": "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -89,7 +89,7 @@ func (EndpointSlice) SwaggerDoc() map[string]string { var map_EndpointSliceList = map[string]string{ "": "EndpointSliceList represents a list of endpoint slices", "metadata": "Standard list metadata.", - "items": "List of endpoint slices", + "items": "items is the list of endpoint slices", } func (EndpointSliceList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1/doc.go b/vendor/k8s.io/api/events/v1/doc.go index 6e320e063..5fe700ffc 100644 --- a/vendor/k8s.io/api/events/v1/doc.go +++ b/vendor/k8s.io/api/events/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=events.k8s.io package v1 // import "k8s.io/api/events/v1" diff --git a/vendor/k8s.io/api/events/v1/generated.pb.go b/vendor/k8s.io/api/events/v1/generated.pb.go index 70ad588a6..96a6047e8 100644 --- a/vendor/k8s.io/api/events/v1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1/generated.proto +// source: k8s.io/api/events/v1/generated.proto package v1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_ee2600587b650fac, []int{0} + return fileDescriptor_d3a3e1495c224e47, []int{0} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_ee2600587b650fac, []int{1} + return fileDescriptor_d3a3e1495c224e47, []int{1} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_ee2600587b650fac, []int{2} + return fileDescriptor_d3a3e1495c224e47, []int{2} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,60 +135,59 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1/generated.proto", fileDescriptor_ee2600587b650fac) + proto.RegisterFile("k8s.io/api/events/v1/generated.proto", fileDescriptor_d3a3e1495c224e47) } -var fileDescriptor_ee2600587b650fac = []byte{ - // 772 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0x8f, 0xbb, 0x4d, 0xda, 0x4c, 0x76, 0xb7, 0xe9, 0x2c, 0x52, 0x87, 0xae, 0xe4, 0x84, 0xac, - 0x84, 0x22, 0x24, 0x6c, 0xb2, 0x42, 0x88, 0x0b, 0x12, 0xeb, 0xa6, 0xa0, 0xa2, 0x96, 0x4a, 0xd3, - 0x9e, 0x10, 0x87, 0x4e, 0x9c, 0x57, 0xd7, 0x24, 0x9e, 0xb1, 0x66, 0x26, 0x91, 0x7a, 0xe3, 0x82, - 0xc4, 0x91, 0x2f, 0xc0, 0x07, 0x40, 0x7c, 0x91, 0x1e, 0x7b, 0xec, 0x29, 0xa2, 0xe6, 0x8b, 0x20, - 0x8f, 0x9d, 0x38, 0xcd, 0x1f, 0x08, 0xda, 0x9b, 0xe7, 0xbd, 0xdf, 0x9f, 0xf7, 0x66, 0x9e, 0x1f, - 0xfa, 0x6a, 0xf0, 0xa5, 0x72, 0x42, 0xe1, 0x0e, 0x46, 0x3d, 0x90, 0x1c, 0x34, 0x28, 0x77, 0x0c, - 0xbc, 0x2f, 0xa4, 0x9b, 0x27, 0x58, 0x1c, 0xba, 0x30, 0x06, 0xae, 0x95, 0x3b, 0xee, 0xb8, 0x01, - 0x70, 0x90, 0x4c, 0x43, 0xdf, 0x89, 0xa5, 0xd0, 0x02, 0x7f, 0x90, 0xa1, 0x1c, 0x16, 0x87, 0x4e, - 0x86, 0x72, 0xc6, 0x9d, 0xc3, 0x4f, 0x83, 0x50, 0xdf, 0x8c, 0x7a, 0x8e, 0x2f, 0x22, 0x37, 0x10, - 0x81, 0x70, 0x0d, 0xb8, 0x37, 0xba, 0x36, 0x27, 0x73, 0x30, 0x5f, 0x99, 0xc8, 0x61, 0x6b, 0xce, - 0xca, 0x17, 0x12, 0x56, 0x18, 0x1d, 0x7e, 0x5e, 0x60, 0x22, 0xe6, 0xdf, 0x84, 0x1c, 0xe4, 0xad, - 0x1b, 0x0f, 0x82, 0x34, 0xa0, 0xdc, 0x08, 0x34, 0x5b, 0xc5, 0x72, 0xd7, 0xb1, 0xe4, 0x88, 0xeb, - 0x30, 0x82, 0x25, 0xc2, 0x17, 0xff, 0x45, 0x50, 0xfe, 0x0d, 0x44, 0x6c, 0x91, 0xd7, 0xfa, 0xbd, - 0x8a, 0xca, 0xc7, 0x69, 0xff, 0xf8, 0x0a, 0xed, 0xa6, 0xd5, 0xf4, 0x99, 0x66, 0xc4, 0x6a, 0x5a, - 0xed, 0xda, 0xdb, 0xcf, 0x9c, 0xe2, 0x92, 0x66, 0xa2, 0x4e, 0x3c, 0x08, 0xd2, 0x80, 0x72, 0x52, - 0xb4, 0x33, 0xee, 0x38, 0xe7, 0xbd, 0x9f, 0xc0, 0xd7, 0x67, 0xa0, 0x99, 0x87, 0xef, 0x26, 0x8d, - 0x52, 0x32, 0x69, 0xa0, 0x22, 0x46, 0x67, 0xaa, 0xf8, 0x0a, 0x55, 0xcd, 0x55, 0x5f, 0x86, 0x11, - 0x90, 0x2d, 0x63, 0xe1, 0x6e, 0x66, 0x71, 0x16, 0xfa, 0x52, 0xa4, 0x34, 0x6f, 0x3f, 0x77, 0xa8, - 0x1e, 0x4f, 0x95, 0x68, 0x21, 0x8a, 0x8f, 0x51, 0x45, 0x81, 0x0c, 0x41, 0x91, 0x67, 0x46, 0xfe, - 0x23, 0x67, 0xd5, 0x33, 0x3b, 0x86, 0x7b, 0x61, 0x80, 0x1e, 0x4a, 0x26, 0x8d, 0x4a, 0xf6, 0x4d, - 0x73, 0x32, 0x3e, 0x43, 0xaf, 0x24, 0xc4, 0x42, 0xea, 0x90, 0x07, 0x47, 0x82, 0x6b, 0x29, 0x86, - 0x43, 0x90, 0x64, 0xbb, 0x69, 0xb5, 0xab, 0xde, 0xeb, 0xbc, 0x82, 0x57, 0x74, 0x19, 0x42, 0x57, - 0xf1, 0xf0, 0xb7, 0x68, 0x7f, 0x16, 0x3e, 0xe1, 0x4a, 0x33, 0xee, 0x03, 0x29, 0x1b, 0xb1, 0x0f, - 0x73, 0xb1, 0x7d, 0xba, 0x08, 0xa0, 0xcb, 0x1c, 0xfc, 0x31, 0xaa, 0x30, 0x5f, 0x87, 0x82, 0x93, - 0x8a, 0x61, 0xbf, 0xcc, 0xd9, 0x95, 0x77, 0x26, 0x4a, 0xf3, 0x6c, 0x8a, 0x93, 0xc0, 0x94, 0xe0, - 0x64, 0xe7, 0x29, 0x8e, 0x9a, 0x28, 0xcd, 0xb3, 0xf8, 0x12, 0x55, 0x25, 0x04, 0x4c, 0xf6, 0x43, - 0x1e, 0x90, 0x5d, 0x73, 0x63, 0x6f, 0xe6, 0x6f, 0x2c, 0x9d, 0xe9, 0xe2, 0x85, 0x29, 0x5c, 0x83, - 0x04, 0xee, 0xcf, 0x3d, 0x02, 0x9d, 0xb2, 0x69, 0x21, 0x84, 0xbf, 0x43, 0x3b, 0x12, 0x86, 0xe9, - 0x8c, 0x91, 0xea, 0xe6, 0x9a, 0xb5, 0x64, 0xd2, 0xd8, 0xa1, 0x19, 0x8f, 0x4e, 0x05, 0x70, 0x13, - 0x6d, 0x73, 0xa1, 0x81, 0x20, 0xd3, 0xc7, 0xf3, 0xdc, 0x77, 0xfb, 0x7b, 0xa1, 0x81, 0x9a, 0x4c, - 0x8a, 0xd0, 0xb7, 0x31, 0x90, 0xda, 0x53, 0xc4, 0xe5, 0x6d, 0x0c, 0xd4, 0x64, 0x30, 0xa0, 0x7a, - 0x1f, 0x62, 0x09, 0x7e, 0xaa, 0x78, 0x21, 0x46, 0xd2, 0x07, 0xf2, 0xdc, 0x14, 0xd6, 0x58, 0x55, - 0x58, 0x36, 0x1c, 0x06, 0xe6, 0x91, 0x5c, 0xae, 0xde, 0x5d, 0x10, 0xa0, 0x4b, 0x92, 0xf8, 0x57, - 0x0b, 0x91, 0x22, 0xf8, 0x4d, 0x28, 0x95, 0x99, 0x49, 0xa5, 0x59, 0x14, 0x93, 0x17, 0xc6, 0xef, - 0x93, 0xcd, 0xa6, 0xdd, 0x0c, 0x7a, 0x33, 0xb7, 0x26, 0xdd, 0x35, 0x9a, 0x74, 0xad, 0x1b, 0xfe, - 0xc5, 0x42, 0x07, 0x45, 0xf2, 0x94, 0xcd, 0x57, 0xf2, 0xf2, 0x7f, 0x57, 0xd2, 0xc8, 0x2b, 0x39, - 0xe8, 0xae, 0x96, 0xa4, 0xeb, 0xbc, 0xf0, 0x3b, 0xb4, 0x57, 0xa4, 0x8e, 0xc4, 0x88, 0x6b, 0xb2, - 0xd7, 0xb4, 0xda, 0x65, 0xef, 0x20, 0x97, 0xdc, 0xeb, 0x3e, 0x4d, 0xd3, 0x45, 0x7c, 0xeb, 0x4f, - 0x0b, 0x65, 0xbf, 0xfa, 0x69, 0xa8, 0x34, 0xfe, 0x71, 0x69, 0x47, 0x39, 0x9b, 0x35, 0x92, 0xb2, - 0xcd, 0x86, 0xaa, 0xe7, 0xce, 0xbb, 0xd3, 0xc8, 0xdc, 0x7e, 0xfa, 0x1a, 0x95, 0x43, 0x0d, 0x91, - 0x22, 0x5b, 0xcd, 0x67, 0xed, 0xda, 0xdb, 0xd7, 0xff, 0xb2, 0x3c, 0xbc, 0x17, 0xb9, 0x4e, 0xf9, - 0x24, 0x65, 0xd0, 0x8c, 0xd8, 0xfa, 0xc3, 0x42, 0xb5, 0xb9, 0xe5, 0x82, 0xdf, 0xa0, 0xb2, 0x6f, - 0xda, 0xb6, 0x4c, 0xdb, 0x33, 0x52, 0xd6, 0x6c, 0x96, 0xc3, 0x23, 0x54, 0x1f, 0x32, 0xa5, 0xcf, - 0x7b, 0x0a, 0xe4, 0x18, 0xfa, 0xef, 0xb3, 0x1d, 0x67, 0xf3, 0x7a, 0xba, 0x20, 0x48, 0x97, 0x2c, - 0xbc, 0xf6, 0xdd, 0xa3, 0x5d, 0xba, 0x7f, 0xb4, 0x4b, 0x0f, 0x8f, 0x76, 0xe9, 0xe7, 0xc4, 0xb6, - 0xee, 0x12, 0xdb, 0xba, 0x4f, 0x6c, 0xeb, 0x21, 0xb1, 0xad, 0xbf, 0x12, 0xdb, 0xfa, 0xed, 0x6f, - 0xbb, 0xf4, 0xc3, 0xd6, 0xb8, 0xf3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6b, 0xcb, 0x1e, 0x6e, - 0x6b, 0x07, 0x00, 0x00, +var fileDescriptor_d3a3e1495c224e47 = []byte{ + // 759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x4f, 0xdb, 0x48, + 0x14, 0x8f, 0x81, 0x04, 0x32, 0xe1, 0x4f, 0x18, 0x90, 0x98, 0x05, 0xc9, 0xc9, 0x86, 0xd5, 0x2a, + 0x5a, 0x69, 0xed, 0x05, 0xad, 0x56, 0xab, 0x3d, 0x2d, 0x26, 0xec, 0x8a, 0x0a, 0x8a, 0x34, 0x70, + 0xaa, 0x7a, 0x60, 0xe2, 0x3c, 0x8c, 0x4b, 0xec, 0xb1, 0xc6, 0x93, 0x48, 0xdc, 0x7a, 0xa9, 0xd4, + 0x63, 0xbf, 0x40, 0x3f, 0x40, 0xd5, 0x2f, 0xc2, 0x91, 0x23, 0xa7, 0xa8, 0xb8, 0x5f, 0xa4, 0xf2, + 0xd8, 0x89, 0x43, 0xfe, 0xb4, 0xa9, 0x7a, 0xf3, 0xbc, 0xf7, 0xfb, 0xf3, 0xde, 0xcc, 0xcb, 0x0b, + 0xfa, 0xe5, 0xe6, 0xef, 0xd0, 0x70, 0xb9, 0xc9, 0x02, 0xd7, 0x84, 0x2e, 0xf8, 0x32, 0x34, 0xbb, + 0x7b, 0xa6, 0x03, 0x3e, 0x08, 0x26, 0xa1, 0x65, 0x04, 0x82, 0x4b, 0x8e, 0x37, 0x13, 0x94, 0xc1, + 0x02, 0xd7, 0x48, 0x50, 0x46, 0x77, 0x6f, 0xfb, 0x77, 0xc7, 0x95, 0xd7, 0x9d, 0xa6, 0x61, 0x73, + 0xcf, 0x74, 0xb8, 0xc3, 0x4d, 0x05, 0x6e, 0x76, 0xae, 0xd4, 0x49, 0x1d, 0xd4, 0x57, 0x22, 0xb2, + 0x5d, 0x1b, 0xb2, 0xb2, 0xb9, 0x80, 0x09, 0x46, 0xdb, 0x7f, 0x66, 0x18, 0x8f, 0xd9, 0xd7, 0xae, + 0x0f, 0xe2, 0xd6, 0x0c, 0x6e, 0x9c, 0x38, 0x10, 0x9a, 0x1e, 0x48, 0x36, 0x89, 0x65, 0x4e, 0x63, + 0x89, 0x8e, 0x2f, 0x5d, 0x0f, 0xc6, 0x08, 0x7f, 0x7d, 0x8b, 0x10, 0xda, 0xd7, 0xe0, 0xb1, 0x51, + 0x5e, 0xed, 0x7d, 0x11, 0xe5, 0x8f, 0xe2, 0xfe, 0xf1, 0x25, 0x5a, 0x8a, 0xab, 0x69, 0x31, 0xc9, + 0x88, 0x56, 0xd5, 0xea, 0xa5, 0xfd, 0x3f, 0x8c, 0xec, 0x92, 0x06, 0xa2, 0x46, 0x70, 0xe3, 0xc4, + 0x81, 0xd0, 0x88, 0xd1, 0x46, 0x77, 0xcf, 0x38, 0x6b, 0xbe, 0x02, 0x5b, 0x9e, 0x82, 0x64, 0x16, + 0xbe, 0xeb, 0x55, 0x72, 0x51, 0xaf, 0x82, 0xb2, 0x18, 0x1d, 0xa8, 0xe2, 0x4b, 0x54, 0x54, 0x57, + 0x7d, 0xe1, 0x7a, 0x40, 0xe6, 0x94, 0x85, 0x39, 0x9b, 0xc5, 0xa9, 0x6b, 0x0b, 0x1e, 0xd3, 0xac, + 0xf5, 0xd4, 0xa1, 0x78, 0xd4, 0x57, 0xa2, 0x99, 0x28, 0x3e, 0x42, 0x85, 0x10, 0x84, 0x0b, 0x21, + 0x99, 0x57, 0xf2, 0x3f, 0x1b, 0x93, 0x9e, 0xd9, 0x50, 0xdc, 0x73, 0x05, 0xb4, 0x50, 0xd4, 0xab, + 0x14, 0x92, 0x6f, 0x9a, 0x92, 0xf1, 0x29, 0xda, 0x10, 0x10, 0x70, 0x21, 0x5d, 0xdf, 0x39, 0xe4, + 0xbe, 0x14, 0xbc, 0xdd, 0x06, 0x41, 0x16, 0xaa, 0x5a, 0xbd, 0x68, 0xed, 0xa4, 0x15, 0x6c, 0xd0, + 0x71, 0x08, 0x9d, 0xc4, 0xc3, 0xff, 0xa3, 0xf5, 0x41, 0xf8, 0xd8, 0x0f, 0x25, 0xf3, 0x6d, 0x20, + 0x79, 0x25, 0xf6, 0x53, 0x2a, 0xb6, 0x4e, 0x47, 0x01, 0x74, 0x9c, 0x83, 0x7f, 0x45, 0x05, 0x66, + 0x4b, 0x97, 0xfb, 0xa4, 0xa0, 0xd8, 0xab, 0x29, 0xbb, 0x70, 0xa0, 0xa2, 0x34, 0xcd, 0xc6, 0x38, + 0x01, 0x2c, 0xe4, 0x3e, 0x59, 0x7c, 0x8a, 0xa3, 0x2a, 0x4a, 0xd3, 0x2c, 0xbe, 0x40, 0x45, 0x01, + 0x0e, 0x13, 0x2d, 0xd7, 0x77, 0xc8, 0x92, 0xba, 0xb1, 0xdd, 0xe1, 0x1b, 0x8b, 0x67, 0x3a, 0x7b, + 0x61, 0x0a, 0x57, 0x20, 0xc0, 0xb7, 0x87, 0x1e, 0x81, 0xf6, 0xd9, 0x34, 0x13, 0xc2, 0xcf, 0xd0, + 0xa2, 0x80, 0x76, 0x3c, 0x63, 0xa4, 0x38, 0xbb, 0x66, 0x29, 0xea, 0x55, 0x16, 0x69, 0xc2, 0xa3, + 0x7d, 0x01, 0x5c, 0x45, 0x0b, 0x3e, 0x97, 0x40, 0x90, 0xea, 0x63, 0x39, 0xf5, 0x5d, 0x78, 0xce, + 0x25, 0x50, 0x95, 0x89, 0x11, 0xf2, 0x36, 0x00, 0x52, 0x7a, 0x8a, 0xb8, 0xb8, 0x0d, 0x80, 0xaa, + 0x0c, 0x06, 0x54, 0x6e, 0x41, 0x20, 0xc0, 0x8e, 0x15, 0xcf, 0x79, 0x47, 0xd8, 0x40, 0x96, 0x55, + 0x61, 0x95, 0x49, 0x85, 0x25, 0xc3, 0xa1, 0x60, 0x16, 0x49, 0xe5, 0xca, 0x8d, 0x11, 0x01, 0x3a, + 0x26, 0x89, 0xdf, 0x6a, 0x88, 0x64, 0xc1, 0xff, 0x5c, 0x11, 0xaa, 0x99, 0x0c, 0x25, 0xf3, 0x02, + 0xb2, 0xa2, 0xfc, 0x7e, 0x9b, 0x6d, 0xda, 0xd5, 0xa0, 0x57, 0x53, 0x6b, 0xd2, 0x98, 0xa2, 0x49, + 0xa7, 0xba, 0xe1, 0x37, 0x1a, 0xda, 0xca, 0x92, 0x27, 0x6c, 0xb8, 0x92, 0xd5, 0xef, 0xae, 0xa4, + 0x92, 0x56, 0xb2, 0xd5, 0x98, 0x2c, 0x49, 0xa7, 0x79, 0xe1, 0x03, 0xb4, 0x96, 0xa5, 0x0e, 0x79, + 0xc7, 0x97, 0x64, 0xad, 0xaa, 0xd5, 0xf3, 0xd6, 0x56, 0x2a, 0xb9, 0xd6, 0x78, 0x9a, 0xa6, 0xa3, + 0xf8, 0xda, 0x47, 0x0d, 0x25, 0x3f, 0xf5, 0x13, 0x37, 0x94, 0xf8, 0xe5, 0xd8, 0x8e, 0x32, 0x66, + 0x6b, 0x24, 0x66, 0xab, 0x0d, 0x55, 0x4e, 0x9d, 0x97, 0xfa, 0x91, 0xa1, 0xfd, 0xf4, 0x2f, 0xca, + 0xbb, 0x12, 0xbc, 0x90, 0xcc, 0x55, 0xe7, 0xeb, 0xa5, 0xfd, 0x9d, 0xaf, 0x2c, 0x0f, 0x6b, 0x25, + 0xd5, 0xc9, 0x1f, 0xc7, 0x0c, 0x9a, 0x10, 0x6b, 0x1f, 0x34, 0x54, 0x1a, 0x5a, 0x2e, 0x78, 0x17, + 0xe5, 0x6d, 0xd5, 0xb6, 0xa6, 0xda, 0x1e, 0x90, 0x92, 0x66, 0x93, 0x1c, 0xee, 0xa0, 0x72, 0x9b, + 0x85, 0xf2, 0xac, 0x19, 0x82, 0xe8, 0x42, 0xeb, 0x47, 0xb6, 0xe3, 0x60, 0x5e, 0x4f, 0x46, 0x04, + 0xe9, 0x98, 0x85, 0xf5, 0xcf, 0xdd, 0xa3, 0x9e, 0xbb, 0x7f, 0xd4, 0x73, 0x0f, 0x8f, 0x7a, 0xee, + 0x75, 0xa4, 0x6b, 0x77, 0x91, 0xae, 0xdd, 0x47, 0xba, 0xf6, 0x10, 0xe9, 0xda, 0xa7, 0x48, 0xd7, + 0xde, 0x7d, 0xd6, 0x73, 0x2f, 0x36, 0x27, 0xfd, 0x9b, 0x7e, 0x09, 0x00, 0x00, 0xff, 0xff, 0x6f, + 0x4f, 0x7a, 0xe4, 0x64, 0x07, 0x00, 0x00, } func (m *Event) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/events/v1/generated.proto b/vendor/k8s.io/api/events/v1/generated.proto index 04df31b0c..6c7e4cca1 100644 --- a/vendor/k8s.io/api/events/v1/generated.proto +++ b/vendor/k8s.io/api/events/v1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/events/v1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. // Events have a limited retention time and triggers and messages may evolve @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -68,12 +68,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -88,15 +88,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -108,7 +108,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -123,6 +123,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/vendor/k8s.io/api/events/v1/types.go b/vendor/k8s.io/api/events/v1/types.go index e01a2b21e..86b12eee1 100644 --- a/vendor/k8s.io/api/events/v1/types.go +++ b/vendor/k8s.io/api/events/v1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. // Events have a limited retention time and triggers and messages may evolve @@ -109,6 +110,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // EventList is a list of Event objects. type EventList struct { diff --git a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go index 797da63bb..44ac0c3bb 100644 --- a/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..5217d1ac6 --- /dev/null +++ b/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index d92411bc8..5d7881e8c 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// source: k8s.io/api/events/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{0} + return fileDescriptor_99027a32dee7673b, []int{0} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{1} + return fileDescriptor_99027a32dee7673b, []int{1} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{2} + return fileDescriptor_99027a32dee7673b, []int{2} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,60 +135,59 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) + proto.RegisterFile("k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_99027a32dee7673b) } -var fileDescriptor_4f97f691c32a5ac8 = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0x25, 0x28, 0x40, - 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xd1, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0x36, 0x3e, - 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, - 0x7a, 0xec, 0x33, 0xf4, 0xd6, 0x5b, 0x1f, 0xc3, 0xc7, 0x1c, 0x7d, 0x12, 0x6a, 0xf6, 0x45, 0x0a, - 0x2e, 0x57, 0xa2, 0xac, 0x1f, 0x58, 0x45, 0x6f, 0xe2, 0xcc, 0xf7, 0x33, 0x33, 0x3b, 0x1a, 0x14, - 0x8c, 0xbe, 0x55, 0x5e, 0x2c, 0xfc, 0xd1, 0x64, 0x00, 0x92, 0x83, 0x06, 0xe5, 0x4f, 0x81, 0x0f, - 0x85, 0xf4, 0x6d, 0x82, 0xa5, 0xb1, 0x0f, 0x53, 0xe0, 0x5a, 0xf9, 0xd3, 0x93, 0x01, 0x68, 0x76, - 0xe2, 0x47, 0xc0, 0x41, 0x32, 0x0d, 0x43, 0x2f, 0x95, 0x42, 0x0b, 0xfc, 0xac, 0x80, 0x7a, 0x2c, - 0x8d, 0xbd, 0x02, 0xea, 0x59, 0xe8, 0xf1, 0xcb, 0x28, 0xd6, 0x97, 0x93, 0x81, 0x17, 0x8a, 0xc4, - 0x8f, 0x44, 0x24, 0x7c, 0xc3, 0x18, 0x4c, 0x3e, 0x99, 0x2f, 0xf3, 0x61, 0x7e, 0x15, 0x4a, 0xc7, - 0xdd, 0x25, 0xd3, 0x50, 0x48, 0xf0, 0xa7, 0x6b, 0x6e, 0xc7, 0x5f, 0x97, 0x98, 0x84, 0x85, 0x97, - 0x31, 0x07, 0x79, 0xe5, 0xa7, 0xa3, 0x28, 0x0f, 0x28, 0x3f, 0x01, 0xcd, 0x36, 0xb1, 0xfc, 0x6d, - 0x2c, 0x39, 0xe1, 0x3a, 0x4e, 0x60, 0x8d, 0xf0, 0xcd, 0x7d, 0x04, 0x15, 0x5e, 0x42, 0xc2, 0x56, - 0x79, 0xdd, 0x3f, 0x1a, 0xa8, 0x7a, 0x9a, 0x0f, 0x01, 0x7f, 0x44, 0xfb, 0x79, 0x35, 0x43, 0xa6, - 0x19, 0x71, 0x3a, 0x4e, 0xaf, 0xf9, 0xea, 0x2b, 0xaf, 0x9c, 0xd4, 0x42, 0xd4, 0x4b, 0x47, 0x51, - 0x1e, 0x50, 0x5e, 0x8e, 0xf6, 0xa6, 0x27, 0xde, 0xfb, 0xc1, 0xcf, 0x10, 0xea, 0x73, 0xd0, 0x2c, - 0xc0, 0xd7, 0xb3, 0x76, 0x25, 0x9b, 0xb5, 0x51, 0x19, 0xa3, 0x0b, 0x55, 0xfc, 0x11, 0x35, 0xcc, - 0xbc, 0x2f, 0xe2, 0x04, 0xc8, 0x03, 0x63, 0xe1, 0xef, 0x66, 0x71, 0x1e, 0x87, 0x52, 0xe4, 0xb4, - 0xe0, 0xd0, 0x3a, 0x34, 0x4e, 0xe7, 0x4a, 0xb4, 0x14, 0xc5, 0xef, 0x50, 0x4d, 0x81, 0x8c, 0x41, - 0x91, 0x87, 0x46, 0xfe, 0x85, 0xb7, 0xf5, 0xad, 0x3d, 0x23, 0xf0, 0xc1, 0xa0, 0x03, 0x94, 0xcd, - 0xda, 0xb5, 0xe2, 0x37, 0xb5, 0x0a, 0xf8, 0x1c, 0x3d, 0x95, 0x90, 0x0a, 0xa9, 0x63, 0x1e, 0xbd, - 0x11, 0x5c, 0x4b, 0x31, 0x1e, 0x83, 0x24, 0x7b, 0x1d, 0xa7, 0xd7, 0x08, 0xbe, 0xb0, 0x65, 0x3c, - 0xa5, 0xeb, 0x10, 0xba, 0x89, 0x87, 0xbf, 0x47, 0x87, 0x8b, 0xf0, 0x5b, 0xae, 0x34, 0xe3, 0x21, - 0x90, 0xaa, 0x11, 0x7b, 0x66, 0xc5, 0x0e, 0xe9, 0x2a, 0x80, 0xae, 0x73, 0xf0, 0x0b, 0x54, 0x63, - 0xa1, 0x8e, 0x05, 0x27, 0x35, 0xc3, 0x7e, 0x62, 0xd9, 0xb5, 0xd7, 0x26, 0x4a, 0x6d, 0x36, 0xc7, - 0x49, 0x60, 0x4a, 0x70, 0x52, 0xbf, 0x8b, 0xa3, 0x26, 0x4a, 0x6d, 0x16, 0x5f, 0xa0, 0x86, 0x84, - 0x88, 0xc9, 0x61, 0xcc, 0x23, 0xb2, 0x6f, 0xc6, 0xf6, 0x7c, 0x79, 0x6c, 0xf9, 0x62, 0x97, 0xcf, - 0x4c, 0xe1, 0x13, 0x48, 0xe0, 0xe1, 0xd2, 0x4b, 0xd0, 0x39, 0x9b, 0x96, 0x42, 0xf8, 0x1d, 0xaa, - 0x4b, 0x18, 0xe7, 0x8b, 0x46, 0x1a, 0xbb, 0x6b, 0x36, 0xb3, 0x59, 0xbb, 0x4e, 0x0b, 0x1e, 0x9d, - 0x0b, 0xe0, 0x0e, 0xda, 0xe3, 0x42, 0x03, 0x41, 0xa6, 0x8f, 0x47, 0xd6, 0x77, 0xef, 0x07, 0xa1, - 0x81, 0x9a, 0x4c, 0x8e, 0xd0, 0x57, 0x29, 0x90, 0xe6, 0x5d, 0xc4, 0xc5, 0x55, 0x0a, 0xd4, 0x64, - 0x30, 0xa0, 0xd6, 0x10, 0x52, 0x09, 0x61, 0xae, 0xf8, 0x41, 0x4c, 0x64, 0x08, 0xe4, 0x91, 0x29, - 0xac, 0xbd, 0xa9, 0xb0, 0x62, 0x39, 0x0c, 0x2c, 0x20, 0x56, 0xae, 0xd5, 0x5f, 0x11, 0xa0, 0x6b, - 0x92, 0xf8, 0x37, 0x07, 0x91, 0x32, 0xf8, 0x5d, 0x2c, 0x95, 0x59, 0x4c, 0xa5, 0x59, 0x92, 0x92, - 0xc7, 0xc6, 0xef, 0xcb, 0xdd, 0x56, 0xde, 0x6c, 0x7b, 0xc7, 0x5a, 0x93, 0xfe, 0x16, 0x4d, 0xba, - 0xd5, 0x0d, 0xff, 0xea, 0xa0, 0xa3, 0x32, 0x79, 0xc6, 0x96, 0x2b, 0x79, 0xf2, 0x9f, 0x2b, 0x69, - 0xdb, 0x4a, 0x8e, 0xfa, 0x9b, 0x25, 0xe9, 0x36, 0x2f, 0xfc, 0x1a, 0x1d, 0x94, 0xa9, 0x37, 0x62, - 0xc2, 0x35, 0x39, 0xe8, 0x38, 0xbd, 0x6a, 0x70, 0x64, 0x25, 0x0f, 0xfa, 0x77, 0xd3, 0x74, 0x15, - 0xdf, 0xfd, 0xcb, 0x41, 0xc5, 0xff, 0xfd, 0x2c, 0x56, 0x1a, 0xff, 0xb4, 0x76, 0xa8, 0xbc, 0xdd, - 0x1a, 0xc9, 0xd9, 0xe6, 0x4c, 0xb5, 0xac, 0xf3, 0xfe, 0x3c, 0xb2, 0x74, 0xa4, 0x4e, 0x51, 0x35, - 0xd6, 0x90, 0x28, 0xf2, 0xa0, 0xf3, 0xb0, 0xd7, 0x7c, 0xd5, 0xb9, 0xef, 0x82, 0x04, 0x8f, 0xad, - 0x58, 0xf5, 0x6d, 0x4e, 0xa3, 0x05, 0xbb, 0xfb, 0xa7, 0x83, 0x9a, 0x4b, 0x17, 0x06, 0x3f, 0x47, - 0xd5, 0xd0, 0xf4, 0xee, 0x98, 0xde, 0x17, 0xa4, 0xa2, 0xe3, 0x22, 0x87, 0x27, 0xa8, 0x35, 0x66, - 0x4a, 0xbf, 0x1f, 0x28, 0x90, 0x53, 0x18, 0xfe, 0x9f, 0x3b, 0xb9, 0x58, 0xda, 0xb3, 0x15, 0x41, - 0xba, 0x66, 0x11, 0xbc, 0xbc, 0xbe, 0x75, 0x2b, 0x9f, 0x6f, 0xdd, 0xca, 0xcd, 0xad, 0x5b, 0xf9, - 0x25, 0x73, 0x9d, 0xeb, 0xcc, 0x75, 0x3e, 0x67, 0xae, 0x73, 0x93, 0xb9, 0xce, 0xdf, 0x99, 0xeb, - 0xfc, 0xfe, 0x8f, 0x5b, 0xf9, 0xb1, 0x6e, 0xdb, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xc9, - 0x09, 0x14, 0x84, 0x07, 0x00, 0x00, +var fileDescriptor_99027a32dee7673b = []byte{ + // 764 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcf, 0x4f, 0xdb, 0x48, + 0x14, 0xc7, 0x63, 0x20, 0x81, 0x4c, 0xf8, 0x11, 0x86, 0x03, 0x03, 0x2b, 0x39, 0x51, 0x90, 0x50, + 0x76, 0xa5, 0xb5, 0x17, 0xb4, 0x5a, 0xed, 0x6d, 0x85, 0x09, 0x5b, 0x81, 0xa0, 0x48, 0x03, 0xa7, + 0xaa, 0x07, 0x26, 0xce, 0xc3, 0xb8, 0xc4, 0x1e, 0x6b, 0x3c, 0x89, 0xc4, 0xad, 0x97, 0x4a, 0x3d, + 0xf6, 0x6f, 0xe8, 0xad, 0xb7, 0xfe, 0x19, 0x1c, 0x39, 0x72, 0x8a, 0x8a, 0xfb, 0x8f, 0x54, 0x1e, + 0x3b, 0x71, 0xc8, 0x0f, 0x91, 0xaa, 0x37, 0xfb, 0xbd, 0xef, 0xf7, 0xf3, 0xde, 0x8c, 0x5f, 0x5e, + 0xd0, 0xef, 0xb7, 0xff, 0x86, 0x86, 0xcb, 0x4d, 0x16, 0xb8, 0x26, 0x74, 0xc1, 0x97, 0xa1, 0xd9, + 0xdd, 0x6b, 0x82, 0x64, 0x7b, 0xa6, 0x03, 0x3e, 0x08, 0x26, 0xa1, 0x65, 0x04, 0x82, 0x4b, 0x8e, + 0xb7, 0x12, 0xa9, 0xc1, 0x02, 0xd7, 0x48, 0xa4, 0x46, 0x2a, 0xdd, 0xfe, 0xd3, 0x71, 0xe5, 0x4d, + 0xa7, 0x69, 0xd8, 0xdc, 0x33, 0x1d, 0xee, 0x70, 0x53, 0x39, 0x9a, 0x9d, 0x6b, 0xf5, 0xa6, 0x5e, + 0xd4, 0x53, 0x42, 0xda, 0xae, 0x0d, 0x15, 0xb5, 0xb9, 0x00, 0xb3, 0x3b, 0x56, 0x6d, 0xfb, 0xef, + 0x4c, 0xe3, 0x31, 0xfb, 0xc6, 0xf5, 0x41, 0xdc, 0x99, 0xc1, 0xad, 0x13, 0x07, 0x42, 0xd3, 0x03, + 0xc9, 0x26, 0xb9, 0xcc, 0x69, 0x2e, 0xd1, 0xf1, 0xa5, 0xeb, 0xc1, 0x98, 0xe1, 0x9f, 0x97, 0x0c, + 0xa1, 0x7d, 0x03, 0x1e, 0x1b, 0xf5, 0xd5, 0x3e, 0x17, 0x51, 0xfe, 0x28, 0xbe, 0x04, 0x7c, 0x85, + 0x96, 0xe2, 0x6e, 0x5a, 0x4c, 0x32, 0xa2, 0x55, 0xb5, 0x7a, 0x69, 0xff, 0x2f, 0x23, 0xbb, 0xa9, + 0x01, 0xd4, 0x08, 0x6e, 0x9d, 0x38, 0x10, 0x1a, 0xb1, 0xda, 0xe8, 0xee, 0x19, 0xe7, 0xcd, 0x77, + 0x60, 0xcb, 0x33, 0x90, 0xcc, 0xc2, 0xf7, 0xbd, 0x4a, 0x2e, 0xea, 0x55, 0x50, 0x16, 0xa3, 0x03, + 0x2a, 0xbe, 0x42, 0x45, 0x75, 0xdf, 0x97, 0xae, 0x07, 0x64, 0x4e, 0x95, 0x30, 0x67, 0x2b, 0x71, + 0xe6, 0xda, 0x82, 0xc7, 0x36, 0x6b, 0x3d, 0xad, 0x50, 0x3c, 0xea, 0x93, 0x68, 0x06, 0xc5, 0x27, + 0xa8, 0x10, 0x82, 0x70, 0x21, 0x24, 0xf3, 0x0a, 0xbf, 0x6b, 0x4c, 0xfd, 0xd6, 0x86, 0x02, 0x5c, + 0x28, 0xb5, 0x85, 0xa2, 0x5e, 0xa5, 0x90, 0x3c, 0xd3, 0x94, 0x80, 0xcf, 0xd0, 0x86, 0x80, 0x80, + 0x0b, 0xe9, 0xfa, 0xce, 0x21, 0xf7, 0xa5, 0xe0, 0xed, 0x36, 0x08, 0xb2, 0x50, 0xd5, 0xea, 0x45, + 0xeb, 0xb7, 0xb4, 0x8d, 0x0d, 0x3a, 0x2e, 0xa1, 0x93, 0x7c, 0xf8, 0x15, 0x5a, 0x1f, 0x84, 0x8f, + 0xfd, 0x50, 0x32, 0xdf, 0x06, 0x92, 0x57, 0xb0, 0xad, 0x14, 0xb6, 0x4e, 0x47, 0x05, 0x74, 0xdc, + 0x83, 0x77, 0x51, 0x81, 0xd9, 0xd2, 0xe5, 0x3e, 0x29, 0x28, 0xf7, 0x6a, 0xea, 0x2e, 0x1c, 0xa8, + 0x28, 0x4d, 0xb3, 0xb1, 0x4e, 0x00, 0x0b, 0xb9, 0x4f, 0x16, 0x9f, 0xeb, 0xa8, 0x8a, 0xd2, 0x34, + 0x8b, 0x2f, 0x51, 0x51, 0x80, 0xc3, 0x44, 0xcb, 0xf5, 0x1d, 0xb2, 0xa4, 0xae, 0x6d, 0x67, 0xf8, + 0xda, 0xe2, 0xc1, 0xce, 0x3e, 0x33, 0x85, 0x6b, 0x10, 0xe0, 0xdb, 0x43, 0x5f, 0x82, 0xf6, 0xdd, + 0x34, 0x03, 0xe1, 0x13, 0xb4, 0x28, 0xa0, 0x1d, 0x0f, 0x1a, 0x29, 0xce, 0xce, 0x2c, 0x45, 0xbd, + 0xca, 0x22, 0x4d, 0x7c, 0xb4, 0x0f, 0xc0, 0x55, 0xb4, 0xe0, 0x73, 0x09, 0x04, 0xa9, 0x73, 0x2c, + 0xa7, 0x75, 0x17, 0x5e, 0x73, 0x09, 0x54, 0x65, 0x62, 0x85, 0xbc, 0x0b, 0x80, 0x94, 0x9e, 0x2b, + 0x2e, 0xef, 0x02, 0xa0, 0x2a, 0x83, 0x01, 0x95, 0x5b, 0x10, 0x08, 0xb0, 0x63, 0xe2, 0x05, 0xef, + 0x08, 0x1b, 0xc8, 0xb2, 0x6a, 0xac, 0x32, 0xa9, 0xb1, 0x64, 0x38, 0x94, 0xcc, 0x22, 0x29, 0xae, + 0xdc, 0x18, 0x01, 0xd0, 0x31, 0x24, 0xfe, 0xa8, 0x21, 0x92, 0x05, 0xff, 0x77, 0x45, 0xa8, 0x06, + 0x33, 0x94, 0xcc, 0x0b, 0xc8, 0x8a, 0xaa, 0xf7, 0xc7, 0x6c, 0x23, 0xaf, 0xa6, 0xbd, 0x9a, 0x96, + 0x26, 0x8d, 0x29, 0x4c, 0x3a, 0xb5, 0x1a, 0xfe, 0xa0, 0xa1, 0xcd, 0x2c, 0x79, 0xca, 0x86, 0x3b, + 0x59, 0xfd, 0xe9, 0x4e, 0x2a, 0x69, 0x27, 0x9b, 0x8d, 0xc9, 0x48, 0x3a, 0xad, 0x16, 0x3e, 0x40, + 0x6b, 0x59, 0xea, 0x90, 0x77, 0x7c, 0x49, 0xd6, 0xaa, 0x5a, 0x3d, 0x6f, 0x6d, 0xa6, 0xc8, 0xb5, + 0xc6, 0xf3, 0x34, 0x1d, 0xd5, 0xd7, 0xbe, 0x6a, 0x28, 0xf9, 0xbd, 0x9f, 0xba, 0xa1, 0xc4, 0x6f, + 0xc7, 0x16, 0x95, 0x31, 0xdb, 0x41, 0x62, 0xb7, 0x5a, 0x53, 0xe5, 0xb4, 0xf2, 0x52, 0x3f, 0x32, + 0xb4, 0xa4, 0x8e, 0x50, 0xde, 0x95, 0xe0, 0x85, 0x64, 0xae, 0x3a, 0x5f, 0x2f, 0xed, 0x57, 0x5f, + 0xda, 0x20, 0xd6, 0x4a, 0x0a, 0xcb, 0x1f, 0xc7, 0x36, 0x9a, 0xb8, 0x6b, 0x5f, 0x34, 0x54, 0x1a, + 0xda, 0x30, 0x78, 0x07, 0xe5, 0x6d, 0x75, 0x76, 0x4d, 0x9d, 0x7d, 0x60, 0x4a, 0x4e, 0x9c, 0xe4, + 0x70, 0x07, 0x95, 0xdb, 0x2c, 0x94, 0xe7, 0xcd, 0x10, 0x44, 0x17, 0x5a, 0xbf, 0xb2, 0x27, 0x07, + 0x43, 0x7b, 0x3a, 0x02, 0xa4, 0x63, 0x25, 0xac, 0xff, 0xee, 0x9f, 0xf4, 0xdc, 0xc3, 0x93, 0x9e, + 0x7b, 0x7c, 0xd2, 0x73, 0xef, 0x23, 0x5d, 0xbb, 0x8f, 0x74, 0xed, 0x21, 0xd2, 0xb5, 0xc7, 0x48, + 0xd7, 0xbe, 0x45, 0xba, 0xf6, 0xe9, 0xbb, 0x9e, 0x7b, 0xb3, 0x35, 0xf5, 0x1f, 0xf6, 0x47, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x2b, 0xc1, 0x64, 0x36, 0x7d, 0x07, 0x00, 0x00, } func (m *Event) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index 57e95b964..fbdb30970 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/events/v1beta1"; // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. // Events have a limited retention time and triggers and messages may evolve @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -72,12 +72,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -92,15 +92,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -112,7 +112,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -125,6 +125,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 0e6bd5a83..e6c28a4f8 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Event = map[string]string{ diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 20b3b2a00..818486f39 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto +// source: k8s.io/api/extensions/v1beta1/generated.proto package v1beta1 @@ -49,94 +49,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} + return fileDescriptor_90a532284de28347, []int{0} } func (m *DaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +80,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} + return fileDescriptor_90a532284de28347, []int{1} } func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +108,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} + return fileDescriptor_90a532284de28347, []int{2} } func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +136,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} + return fileDescriptor_90a532284de28347, []int{3} } func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +164,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} + return fileDescriptor_90a532284de28347, []int{4} } func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +192,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} + return fileDescriptor_90a532284de28347, []int{5} } func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +220,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} + return fileDescriptor_90a532284de28347, []int{6} } func (m *Deployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +248,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} + return fileDescriptor_90a532284de28347, []int{7} } func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +276,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} + return fileDescriptor_90a532284de28347, []int{8} } func (m *DeploymentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +304,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} + return fileDescriptor_90a532284de28347, []int{9} } func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +332,7 @@ var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} + return fileDescriptor_90a532284de28347, []int{10} } func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +360,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} + return fileDescriptor_90a532284de28347, []int{11} } func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +388,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} + return fileDescriptor_90a532284de28347, []int{12} } func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,38 +413,10 @@ func (m *DeploymentStrategy) XXX_DiscardUnknown() { var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} + return fileDescriptor_90a532284de28347, []int{13} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +444,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} + return fileDescriptor_90a532284de28347, []int{14} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,66 +469,10 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} + return fileDescriptor_90a532284de28347, []int{15} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +500,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} + return fileDescriptor_90a532284de28347, []int{16} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +528,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} + return fileDescriptor_90a532284de28347, []int{17} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +556,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} + return fileDescriptor_90a532284de28347, []int{18} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -749,10 +581,94 @@ func (m *IngressList) XXX_DiscardUnknown() { var xxx_messageInfo_IngressList proto.InternalMessageInfo +func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } +func (*IngressLoadBalancerIngress) ProtoMessage() {} +func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_90a532284de28347, []int{19} +} +func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src) +} +func (m *IngressLoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo + +func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } +func (*IngressLoadBalancerStatus) ProtoMessage() {} +func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_90a532284de28347, []int{20} +} +func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src) +} +func (m *IngressLoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo + +func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } +func (*IngressPortStatus) ProtoMessage() {} +func (*IngressPortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_90a532284de28347, []int{21} +} +func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressPortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressPortStatus.Merge(m, src) +} +func (m *IngressPortStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressPortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressPortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo + func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} + return fileDescriptor_90a532284de28347, []int{22} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +696,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} + return fileDescriptor_90a532284de28347, []int{23} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +724,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} + return fileDescriptor_90a532284de28347, []int{24} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +752,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} + return fileDescriptor_90a532284de28347, []int{25} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +780,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} + return fileDescriptor_90a532284de28347, []int{26} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +808,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} + return fileDescriptor_90a532284de28347, []int{27} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +836,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} + return fileDescriptor_90a532284de28347, []int{28} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +864,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} + return fileDescriptor_90a532284de28347, []int{29} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +892,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} + return fileDescriptor_90a532284de28347, []int{30} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +920,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} + return fileDescriptor_90a532284de28347, []int{31} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +948,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} + return fileDescriptor_90a532284de28347, []int{32} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +976,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} + return fileDescriptor_90a532284de28347, []int{33} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1085,94 +1001,10 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} + return fileDescriptor_90a532284de28347, []int{34} } func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1032,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} + return fileDescriptor_90a532284de28347, []int{35} } func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1060,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} + return fileDescriptor_90a532284de28347, []int{36} } func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} + return fileDescriptor_90a532284de28347, []int{37} } func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} + return fileDescriptor_90a532284de28347, []int{38} } func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} + return fileDescriptor_90a532284de28347, []int{39} } func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1172,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} + return fileDescriptor_90a532284de28347, []int{40} } func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1200,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} + return fileDescriptor_90a532284de28347, []int{41} } func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1393,122 +1225,10 @@ func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} -} -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) -} -func (m *RunAsGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} -} -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) -} -func (m *RunAsUserStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} + return fileDescriptor_90a532284de28347, []int{42} } func (m *Scale) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1256,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} + return fileDescriptor_90a532284de28347, []int{43} } func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1284,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} + return fileDescriptor_90a532284de28347, []int{44} } func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1589,38 +1309,7 @@ func (m *ScaleStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -1635,15 +1324,15 @@ func init() { proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.extensions.v1beta1.IngressLoadBalancerIngress") + proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressLoadBalancerStatus") + proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressPortStatus") proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") @@ -1656,9 +1345,6 @@ func init() { proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") @@ -1667,351 +1353,196 @@ func init() { proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) -} - -var fileDescriptor_cdc93917efc28165 = []byte{ - // 3761 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1c, 0xc7, - 0x72, 0xd6, 0xec, 0x2e, 0xb9, 0xcb, 0xe2, 0x7f, 0x93, 0x22, 0xf7, 0x49, 0x4f, 0x5c, 0xbd, 0x31, - 0xa2, 0xc8, 0x8e, 0xb4, 0x6b, 0xc9, 0x92, 0x9e, 0x22, 0x21, 0xef, 0x99, 0x4b, 0x8a, 0x12, 0x5f, - 0xf8, 0xb3, 0xee, 0x25, 0x65, 0xc3, 0x88, 0x1d, 0x0f, 0x77, 0x9b, 0xcb, 0x11, 0x67, 0x67, 0xc6, - 0xd3, 0xb3, 0x34, 0x17, 0xc8, 0x21, 0x87, 0x5c, 0x0c, 0x04, 0x48, 0x2e, 0x4e, 0x72, 0x8c, 0x11, - 0x20, 0xb7, 0x20, 0xc7, 0xe4, 0x60, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0x06, 0x72, 0x88, - 0x4f, 0x44, 0x4c, 0x9f, 0x82, 0x9c, 0x72, 0x0b, 0x74, 0x0a, 0xba, 0xa7, 0xe7, 0x7f, 0x86, 0x3b, - 0xa4, 0x25, 0x22, 0x0e, 0x72, 0x12, 0xb7, 0xab, 0xea, 0xab, 0xea, 0xee, 0xea, 0xaa, 0xea, 0x9e, - 0x12, 0xac, 0xec, 0xdf, 0xa7, 0x55, 0xd5, 0xa8, 0xed, 0xf7, 0x76, 0x88, 0xa5, 0x13, 0x9b, 0xd0, - 0xda, 0x01, 0xd1, 0xdb, 0x86, 0x55, 0x13, 0x04, 0xc5, 0x54, 0x6b, 0xe4, 0xd0, 0x26, 0x3a, 0x55, - 0x0d, 0x9d, 0xd6, 0x0e, 0x6e, 0xed, 0x10, 0x5b, 0xb9, 0x55, 0xeb, 0x10, 0x9d, 0x58, 0x8a, 0x4d, - 0xda, 0x55, 0xd3, 0x32, 0x6c, 0x03, 0x5d, 0x71, 0xd8, 0xab, 0x8a, 0xa9, 0x56, 0x7d, 0xf6, 0xaa, - 0x60, 0xbf, 0x74, 0xb3, 0xa3, 0xda, 0x7b, 0xbd, 0x9d, 0x6a, 0xcb, 0xe8, 0xd6, 0x3a, 0x46, 0xc7, - 0xa8, 0x71, 0xa9, 0x9d, 0xde, 0x2e, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x1c, 0xb4, 0x4b, 0x72, 0x40, - 0x79, 0xcb, 0xb0, 0x48, 0xed, 0x20, 0xa6, 0xf1, 0xd2, 0x1d, 0x9f, 0xa7, 0xab, 0xb4, 0xf6, 0x54, - 0x9d, 0x58, 0xfd, 0x9a, 0xb9, 0xdf, 0x61, 0x03, 0xb4, 0xd6, 0x25, 0xb6, 0x92, 0x24, 0x55, 0x4b, - 0x93, 0xb2, 0x7a, 0xba, 0xad, 0x76, 0x49, 0x4c, 0xe0, 0xde, 0x20, 0x01, 0xda, 0xda, 0x23, 0x5d, - 0x25, 0x26, 0xf7, 0x56, 0x9a, 0x5c, 0xcf, 0x56, 0xb5, 0x9a, 0xaa, 0xdb, 0xd4, 0xb6, 0xa2, 0x42, - 0xf2, 0x1d, 0x98, 0x5a, 0xd4, 0x34, 0xe3, 0x13, 0xd2, 0x5e, 0x6a, 0xae, 0x2e, 0x5b, 0xea, 0x01, - 0xb1, 0xd0, 0x55, 0x28, 0xe8, 0x4a, 0x97, 0x94, 0xa5, 0xab, 0xd2, 0xf5, 0x91, 0xfa, 0xd8, 0xf3, - 0xa3, 0xca, 0x85, 0xe3, 0xa3, 0x4a, 0x61, 0x43, 0xe9, 0x12, 0xcc, 0x29, 0xf2, 0x43, 0x98, 0x16, - 0x52, 0x2b, 0x1a, 0x39, 0x7c, 0x6a, 0x68, 0xbd, 0x2e, 0x41, 0xd7, 0x60, 0xb8, 0xcd, 0x01, 0x84, - 0xe0, 0x84, 0x10, 0x1c, 0x76, 0x60, 0xb1, 0xa0, 0xca, 0x14, 0x26, 0x85, 0xf0, 0x13, 0x83, 0xda, - 0x0d, 0xc5, 0xde, 0x43, 0xb7, 0x01, 0x4c, 0xc5, 0xde, 0x6b, 0x58, 0x64, 0x57, 0x3d, 0x14, 0xe2, - 0x48, 0x88, 0x43, 0xc3, 0xa3, 0xe0, 0x00, 0x17, 0xba, 0x01, 0x25, 0x8b, 0x28, 0xed, 0x4d, 0x5d, - 0xeb, 0x97, 0x73, 0x57, 0xa5, 0xeb, 0xa5, 0xfa, 0x94, 0x90, 0x28, 0x61, 0x31, 0x8e, 0x3d, 0x0e, - 0xf9, 0xb3, 0x1c, 0x8c, 0x2c, 0x2b, 0xa4, 0x6b, 0xe8, 0x4d, 0x62, 0xa3, 0x8f, 0xa0, 0xc4, 0xb6, - 0xab, 0xad, 0xd8, 0x0a, 0xd7, 0x36, 0x7a, 0xfb, 0xcd, 0xaa, 0xef, 0x4e, 0xde, 0xea, 0x55, 0xcd, - 0xfd, 0x0e, 0x1b, 0xa0, 0x55, 0xc6, 0x5d, 0x3d, 0xb8, 0x55, 0xdd, 0xdc, 0x79, 0x46, 0x5a, 0xf6, - 0x3a, 0xb1, 0x15, 0xdf, 0x3e, 0x7f, 0x0c, 0x7b, 0xa8, 0x68, 0x03, 0x0a, 0xd4, 0x24, 0x2d, 0x6e, - 0xd9, 0xe8, 0xed, 0x1b, 0xd5, 0x13, 0x9d, 0xb5, 0xea, 0x59, 0xd6, 0x34, 0x49, 0xcb, 0x5f, 0x71, - 0xf6, 0x0b, 0x73, 0x1c, 0xf4, 0x14, 0x86, 0xa9, 0xad, 0xd8, 0x3d, 0x5a, 0xce, 0x73, 0xc4, 0x6a, - 0x66, 0x44, 0x2e, 0xe5, 0x6f, 0x86, 0xf3, 0x1b, 0x0b, 0x34, 0xf9, 0x3f, 0x72, 0x80, 0x3c, 0xde, - 0x25, 0x43, 0x6f, 0xab, 0xb6, 0x6a, 0xe8, 0xe8, 0x01, 0x14, 0xec, 0xbe, 0xe9, 0xba, 0xc0, 0x35, - 0xd7, 0xa0, 0xad, 0xbe, 0x49, 0x5e, 0x1c, 0x55, 0xe6, 0xe2, 0x12, 0x8c, 0x82, 0xb9, 0x0c, 0x5a, - 0xf3, 0x4c, 0xcd, 0x71, 0xe9, 0x3b, 0x61, 0xd5, 0x2f, 0x8e, 0x2a, 0x09, 0x87, 0xad, 0xea, 0x21, - 0x85, 0x0d, 0x44, 0x07, 0x80, 0x34, 0x85, 0xda, 0x5b, 0x96, 0xa2, 0x53, 0x47, 0x93, 0xda, 0x25, - 0x62, 0x11, 0xde, 0xc8, 0xb6, 0x69, 0x4c, 0xa2, 0x7e, 0x49, 0x58, 0x81, 0xd6, 0x62, 0x68, 0x38, - 0x41, 0x03, 0xf3, 0x66, 0x8b, 0x28, 0xd4, 0xd0, 0xcb, 0x85, 0xb0, 0x37, 0x63, 0x3e, 0x8a, 0x05, - 0x15, 0xbd, 0x0e, 0xc5, 0x2e, 0xa1, 0x54, 0xe9, 0x90, 0xf2, 0x10, 0x67, 0x9c, 0x14, 0x8c, 0xc5, - 0x75, 0x67, 0x18, 0xbb, 0x74, 0xf9, 0x0b, 0x09, 0xc6, 0xbd, 0x95, 0x5b, 0x53, 0xa9, 0x8d, 0x7e, - 0x27, 0xe6, 0x87, 0xd5, 0x6c, 0x53, 0x62, 0xd2, 0xdc, 0x0b, 0x3d, 0x9f, 0x77, 0x47, 0x02, 0x3e, - 0xb8, 0x0e, 0x43, 0xaa, 0x4d, 0xba, 0x6c, 0x1f, 0xf2, 0xd7, 0x47, 0x6f, 0x5f, 0xcf, 0xea, 0x32, - 0xf5, 0x71, 0x01, 0x3a, 0xb4, 0xca, 0xc4, 0xb1, 0x83, 0x22, 0xff, 0x49, 0x21, 0x60, 0x3e, 0x73, - 0x4d, 0xf4, 0x01, 0x94, 0x28, 0xd1, 0x48, 0xcb, 0x36, 0x2c, 0x61, 0xfe, 0x5b, 0x19, 0xcd, 0x57, - 0x76, 0x88, 0xd6, 0x14, 0xa2, 0xf5, 0x31, 0x66, 0xbf, 0xfb, 0x0b, 0x7b, 0x90, 0xe8, 0x1d, 0x28, - 0xd9, 0xa4, 0x6b, 0x6a, 0x8a, 0x4d, 0xc4, 0x39, 0x7a, 0x2d, 0x38, 0x05, 0xe6, 0x39, 0x0c, 0xac, - 0x61, 0xb4, 0xb7, 0x04, 0x1b, 0x3f, 0x3e, 0xde, 0x92, 0xb8, 0xa3, 0xd8, 0x83, 0x41, 0x07, 0x30, - 0xd1, 0x33, 0xdb, 0x8c, 0xd3, 0x66, 0x51, 0xb0, 0xd3, 0x17, 0x9e, 0x74, 0x2f, 0xeb, 0xda, 0x6c, - 0x87, 0xa4, 0xeb, 0x73, 0x42, 0xd7, 0x44, 0x78, 0x1c, 0x47, 0xb4, 0xa0, 0x45, 0x98, 0xec, 0xaa, - 0x3a, 0x8b, 0x4b, 0xfd, 0x26, 0x69, 0x19, 0x7a, 0x9b, 0x72, 0xb7, 0x1a, 0xaa, 0xcf, 0x0b, 0x80, - 0xc9, 0xf5, 0x30, 0x19, 0x47, 0xf9, 0xd1, 0xaf, 0x00, 0xb9, 0xd3, 0x78, 0xec, 0x04, 0x71, 0xd5, - 0xd0, 0xb9, 0xcf, 0xe5, 0x7d, 0xe7, 0xde, 0x8a, 0x71, 0xe0, 0x04, 0x29, 0xb4, 0x06, 0xb3, 0x16, - 0x39, 0x50, 0xd9, 0x1c, 0x9f, 0xa8, 0xd4, 0x36, 0xac, 0xfe, 0x9a, 0xda, 0x55, 0xed, 0xf2, 0x30, - 0xb7, 0xa9, 0x7c, 0x7c, 0x54, 0x99, 0xc5, 0x09, 0x74, 0x9c, 0x28, 0x25, 0xff, 0xe9, 0x30, 0x4c, - 0x46, 0xe2, 0x0d, 0x7a, 0x0a, 0x73, 0xad, 0x9e, 0x65, 0x11, 0xdd, 0xde, 0xe8, 0x75, 0x77, 0x88, - 0xd5, 0x6c, 0xed, 0x91, 0x76, 0x4f, 0x23, 0x6d, 0xee, 0x28, 0x43, 0xf5, 0x05, 0x61, 0xf1, 0xdc, - 0x52, 0x22, 0x17, 0x4e, 0x91, 0x66, 0xab, 0xa0, 0xf3, 0xa1, 0x75, 0x95, 0x52, 0x0f, 0x33, 0xc7, - 0x31, 0xbd, 0x55, 0xd8, 0x88, 0x71, 0xe0, 0x04, 0x29, 0x66, 0x63, 0x9b, 0x50, 0xd5, 0x22, 0xed, - 0xa8, 0x8d, 0xf9, 0xb0, 0x8d, 0xcb, 0x89, 0x5c, 0x38, 0x45, 0x1a, 0xdd, 0x85, 0x51, 0x47, 0x1b, - 0xdf, 0x3f, 0xb1, 0xd1, 0x33, 0x02, 0x6c, 0x74, 0xc3, 0x27, 0xe1, 0x20, 0x1f, 0x9b, 0x9a, 0xb1, - 0x43, 0x89, 0x75, 0x40, 0xda, 0xe9, 0x1b, 0xbc, 0x19, 0xe3, 0xc0, 0x09, 0x52, 0x6c, 0x6a, 0x8e, - 0x07, 0xc6, 0xa6, 0x36, 0x1c, 0x9e, 0xda, 0x76, 0x22, 0x17, 0x4e, 0x91, 0x66, 0x7e, 0xec, 0x98, - 0xbc, 0x78, 0xa0, 0xa8, 0x9a, 0xb2, 0xa3, 0x91, 0x72, 0x31, 0xec, 0xc7, 0x1b, 0x61, 0x32, 0x8e, - 0xf2, 0xa3, 0xc7, 0x30, 0xed, 0x0c, 0x6d, 0xeb, 0x8a, 0x07, 0x52, 0xe2, 0x20, 0x3f, 0x11, 0x20, - 0xd3, 0x1b, 0x51, 0x06, 0x1c, 0x97, 0x41, 0x0f, 0x60, 0xa2, 0x65, 0x68, 0x1a, 0xf7, 0xc7, 0x25, - 0xa3, 0xa7, 0xdb, 0xe5, 0x11, 0x8e, 0x82, 0xd8, 0x79, 0x5c, 0x0a, 0x51, 0x70, 0x84, 0x13, 0x11, - 0x80, 0x96, 0x9b, 0x70, 0x68, 0x19, 0x78, 0x7c, 0xbc, 0x95, 0x35, 0x06, 0x78, 0xa9, 0xca, 0xaf, - 0x01, 0xbc, 0x21, 0x8a, 0x03, 0xc0, 0xf2, 0x3f, 0x49, 0x30, 0x9f, 0x12, 0x3a, 0xd0, 0x2f, 0x43, - 0x29, 0xf6, 0x37, 0x22, 0x29, 0xf6, 0x72, 0x8a, 0x58, 0x20, 0xcf, 0xea, 0x30, 0x6e, 0xb1, 0x59, - 0xe9, 0x1d, 0x87, 0x45, 0xc4, 0xc8, 0xbb, 0x03, 0xa6, 0x81, 0x83, 0x32, 0x7e, 0xcc, 0x9f, 0x3e, - 0x3e, 0xaa, 0x8c, 0x87, 0x68, 0x38, 0x0c, 0x2f, 0xff, 0x59, 0x0e, 0x60, 0x99, 0x98, 0x9a, 0xd1, - 0xef, 0x12, 0xfd, 0x3c, 0x6a, 0xa8, 0xcd, 0x50, 0x0d, 0x75, 0x73, 0xd0, 0xf6, 0x78, 0xa6, 0xa5, - 0x16, 0x51, 0xef, 0x46, 0x8a, 0xa8, 0x5a, 0x76, 0xc8, 0x93, 0xab, 0xa8, 0x7f, 0xcb, 0xc3, 0x8c, - 0xcf, 0xec, 0x97, 0x51, 0x0f, 0x43, 0x7b, 0xfc, 0xeb, 0x91, 0x3d, 0x9e, 0x4f, 0x10, 0x79, 0x65, - 0x75, 0xd4, 0x33, 0x98, 0x60, 0x55, 0x8e, 0xb3, 0x97, 0xbc, 0x86, 0x1a, 0x3e, 0x75, 0x0d, 0xe5, - 0x65, 0xbb, 0xb5, 0x10, 0x12, 0x8e, 0x20, 0xa7, 0xd4, 0x6c, 0xc5, 0x1f, 0x63, 0xcd, 0xf6, 0xa5, - 0x04, 0x13, 0xfe, 0x36, 0x9d, 0x43, 0xd1, 0xb6, 0x11, 0x2e, 0xda, 0x5e, 0xcf, 0xec, 0xa2, 0x29, - 0x55, 0xdb, 0x7f, 0xb3, 0x02, 0xdf, 0x63, 0x62, 0x07, 0x7c, 0x47, 0x69, 0xed, 0x0f, 0xbe, 0xe3, - 0xa1, 0xcf, 0x24, 0x40, 0x22, 0x0b, 0x2c, 0xea, 0xba, 0x61, 0x2b, 0x4e, 0xac, 0x74, 0xcc, 0x5a, - 0xcd, 0x6c, 0x96, 0xab, 0xb1, 0xba, 0x1d, 0xc3, 0x7a, 0xa4, 0xdb, 0x56, 0xdf, 0xdf, 0xe4, 0x38, - 0x03, 0x4e, 0x30, 0x00, 0x29, 0x00, 0x96, 0xc0, 0xdc, 0x32, 0xc4, 0x41, 0xbe, 0x99, 0x21, 0xe6, - 0x31, 0x81, 0x25, 0x43, 0xdf, 0x55, 0x3b, 0x7e, 0xd8, 0xc1, 0x1e, 0x10, 0x0e, 0x80, 0x5e, 0x7a, - 0x04, 0xf3, 0x29, 0xd6, 0xa2, 0x29, 0xc8, 0xef, 0x93, 0xbe, 0xb3, 0x6c, 0x98, 0xfd, 0x89, 0x66, - 0x61, 0xe8, 0x40, 0xd1, 0x7a, 0x4e, 0xf8, 0x1d, 0xc1, 0xce, 0x8f, 0x07, 0xb9, 0xfb, 0x92, 0xfc, - 0xc5, 0x50, 0xd0, 0x77, 0x78, 0xc5, 0x7c, 0x9d, 0x5d, 0x5a, 0x4d, 0x4d, 0x6d, 0x29, 0x54, 0x14, - 0x42, 0x63, 0xce, 0x85, 0xd5, 0x19, 0xc3, 0x1e, 0x35, 0x54, 0x5b, 0xe7, 0x5e, 0x6d, 0x6d, 0x9d, - 0x7f, 0x39, 0xb5, 0xf5, 0xef, 0x42, 0x89, 0xba, 0x55, 0x75, 0x81, 0x43, 0xde, 0x3a, 0x45, 0x7c, - 0x15, 0x05, 0xb5, 0xa7, 0xc0, 0x2b, 0xa5, 0x3d, 0xd0, 0xa4, 0x22, 0x7a, 0xe8, 0x94, 0x45, 0xf4, - 0x4b, 0x2d, 0x7c, 0x59, 0xbc, 0x31, 0x95, 0x1e, 0x25, 0x6d, 0x1e, 0xdb, 0x4a, 0x7e, 0xbc, 0x69, - 0xf0, 0x51, 0x2c, 0xa8, 0xe8, 0x83, 0x90, 0xcb, 0x96, 0xce, 0xe2, 0xb2, 0x13, 0xe9, 0xee, 0x8a, - 0xb6, 0x61, 0xde, 0xb4, 0x8c, 0x8e, 0x45, 0x28, 0x5d, 0x26, 0x4a, 0x5b, 0x53, 0x75, 0xe2, 0xae, - 0x8f, 0x53, 0x11, 0x5d, 0x3e, 0x3e, 0xaa, 0xcc, 0x37, 0x92, 0x59, 0x70, 0x9a, 0xac, 0xfc, 0xbc, - 0x00, 0x53, 0xd1, 0x0c, 0x98, 0x52, 0xa4, 0x4a, 0x67, 0x2a, 0x52, 0x6f, 0x04, 0x0e, 0x83, 0x53, - 0xc1, 0x07, 0x5e, 0x70, 0x62, 0x07, 0x62, 0x11, 0x26, 0x45, 0x34, 0x70, 0x89, 0xa2, 0x4c, 0xf7, - 0x76, 0x7f, 0x3b, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x10, 0xc6, 0x2d, 0x5e, 0x77, 0xbb, 0x00, 0x4e, - 0xed, 0x7a, 0x51, 0x00, 0x8c, 0xe3, 0x20, 0x11, 0x87, 0x79, 0x59, 0xdd, 0xea, 0x97, 0xa3, 0x2e, - 0x40, 0x21, 0x5c, 0xb7, 0x2e, 0x46, 0x19, 0x70, 0x5c, 0x06, 0xad, 0xc3, 0x4c, 0x4f, 0x8f, 0x43, - 0x39, 0xae, 0x7c, 0x59, 0x40, 0xcd, 0x6c, 0xc7, 0x59, 0x70, 0x92, 0x1c, 0xda, 0x0d, 0x95, 0xb2, - 0xc3, 0x3c, 0x3c, 0xdf, 0xce, 0x7c, 0xf0, 0x32, 0xd7, 0xb2, 0x09, 0xe5, 0x76, 0x29, 0x6b, 0xb9, - 0x2d, 0xff, 0xbd, 0x14, 0x4c, 0x42, 0x5e, 0x09, 0x3c, 0xe8, 0x95, 0x29, 0x26, 0x11, 0xa8, 0x8e, - 0x8c, 0xe4, 0xea, 0xf7, 0xde, 0xa9, 0xaa, 0x5f, 0x3f, 0x79, 0x0e, 0x2e, 0x7f, 0x3f, 0x97, 0x60, - 0x6e, 0xa5, 0xf9, 0xd8, 0x32, 0x7a, 0xa6, 0x6b, 0xce, 0xa6, 0xe9, 0x2c, 0xcd, 0xcf, 0xa1, 0x60, - 0xf5, 0x34, 0x77, 0x1e, 0xaf, 0xb9, 0xf3, 0xc0, 0x3d, 0x8d, 0xcd, 0x63, 0x26, 0x22, 0xe5, 0x4c, - 0x82, 0x09, 0xa0, 0x0d, 0x18, 0xb6, 0x14, 0xbd, 0x43, 0xdc, 0xb4, 0x7a, 0x6d, 0x80, 0xf5, 0xab, - 0xcb, 0x98, 0xb1, 0x07, 0x0a, 0x1b, 0x2e, 0x8d, 0x05, 0x8a, 0xfc, 0x0f, 0x12, 0x4c, 0x3e, 0xd9, - 0xda, 0x6a, 0xac, 0xea, 0xfc, 0x44, 0xf3, 0xb7, 0xd5, 0xab, 0x50, 0x30, 0x15, 0x7b, 0x2f, 0x9a, - 0xe9, 0x19, 0x0d, 0x73, 0x0a, 0xba, 0x03, 0x25, 0xf6, 0x2f, 0xb3, 0x8b, 0x1f, 0xa9, 0x11, 0x1e, - 0x08, 0x4b, 0x0d, 0x31, 0xf6, 0x22, 0xf0, 0x37, 0xf6, 0x38, 0xd1, 0x7b, 0x50, 0x64, 0xf1, 0x87, - 0xe8, 0xed, 0x8c, 0x05, 0xba, 0x30, 0xaa, 0xee, 0x08, 0xf9, 0x35, 0x97, 0x18, 0xc0, 0x2e, 0x9c, - 0xbc, 0x0f, 0xb3, 0x81, 0x49, 0xb0, 0x55, 0x7c, 0xca, 0x72, 0x2a, 0x6a, 0xc2, 0x10, 0xd3, 0xce, - 0x32, 0x67, 0x3e, 0xc3, 0x13, 0x68, 0x64, 0x21, 0xfc, 0xfa, 0x88, 0xfd, 0xa2, 0xd8, 0xc1, 0x92, - 0xd7, 0x61, 0x9c, 0x3f, 0x43, 0x1b, 0x96, 0xcd, 0x17, 0x13, 0x5d, 0x81, 0x7c, 0x57, 0xd5, 0x45, - 0x76, 0x1e, 0x15, 0x32, 0x79, 0x96, 0x59, 0xd8, 0x38, 0x27, 0x2b, 0x87, 0x22, 0x5e, 0xf9, 0x64, - 0xe5, 0x10, 0xb3, 0x71, 0xf9, 0x31, 0x14, 0xc5, 0x26, 0x05, 0x81, 0xf2, 0x27, 0x03, 0xe5, 0x13, - 0x80, 0x36, 0xa1, 0xb8, 0xda, 0xa8, 0x6b, 0x86, 0x53, 0xab, 0xb5, 0xd4, 0xb6, 0x15, 0xdd, 0xc1, - 0xa5, 0xd5, 0x65, 0x8c, 0x39, 0x05, 0xc9, 0x30, 0x4c, 0x0e, 0x5b, 0xc4, 0xb4, 0xb9, 0x1f, 0x8d, - 0xd4, 0x81, 0xf9, 0xc6, 0x23, 0x3e, 0x82, 0x05, 0x45, 0xfe, 0xa3, 0x1c, 0x14, 0xc5, 0x72, 0x9c, - 0xc3, 0xdd, 0x6d, 0x2d, 0x74, 0x77, 0x7b, 0x23, 0x9b, 0x6b, 0xa4, 0x5e, 0xdc, 0xb6, 0x22, 0x17, - 0xb7, 0x1b, 0x19, 0xf1, 0x4e, 0xbe, 0xb5, 0x7d, 0x9a, 0x83, 0x89, 0xb0, 0x53, 0xa2, 0xbb, 0x30, - 0xca, 0xd2, 0x94, 0xda, 0x22, 0x1b, 0x7e, 0x75, 0xec, 0x3d, 0xdd, 0x34, 0x7d, 0x12, 0x0e, 0xf2, - 0xa1, 0x8e, 0x27, 0xc6, 0xfc, 0x48, 0x4c, 0x3a, 0x7d, 0x49, 0x7b, 0xb6, 0xaa, 0x55, 0x9d, 0x0f, - 0x32, 0xd5, 0x55, 0xdd, 0xde, 0xb4, 0x9a, 0xb6, 0xa5, 0xea, 0x9d, 0x98, 0x22, 0xee, 0x94, 0x41, - 0x64, 0xf4, 0x2e, 0x4b, 0x99, 0xd4, 0xe8, 0x59, 0x2d, 0x92, 0x54, 0xfa, 0xba, 0x65, 0x1b, 0x3b, - 0xa0, 0xed, 0x35, 0xa3, 0xa5, 0x68, 0xce, 0xe6, 0x60, 0xb2, 0x4b, 0x2c, 0xa2, 0xb7, 0x88, 0x5b, - 0x6e, 0x3a, 0x10, 0xd8, 0x03, 0x93, 0xff, 0x56, 0x82, 0x51, 0xb1, 0x16, 0xe7, 0x70, 0xc9, 0xf9, - 0xed, 0xf0, 0x25, 0xe7, 0x5a, 0xc6, 0xc8, 0x91, 0x7c, 0xc3, 0xf9, 0x4b, 0xdf, 0x74, 0x16, 0x2b, - 0xd8, 0x71, 0xd9, 0x33, 0xa8, 0x1d, 0x3d, 0x2e, 0xec, 0x94, 0x63, 0x4e, 0x41, 0x3d, 0x98, 0x52, - 0x23, 0xc1, 0x45, 0xec, 0x59, 0x2d, 0x9b, 0x25, 0x9e, 0x58, 0xbd, 0x2c, 0xe0, 0xa7, 0xa2, 0x14, - 0x1c, 0x53, 0x21, 0x13, 0x88, 0x71, 0xa1, 0x77, 0xa0, 0xb0, 0x67, 0xdb, 0x66, 0xc2, 0xf3, 0xf9, - 0x80, 0x90, 0xe6, 0x9b, 0x50, 0xe2, 0xb3, 0xdb, 0xda, 0x6a, 0x60, 0x0e, 0x25, 0xff, 0x5d, 0xce, - 0x5b, 0x0f, 0x7e, 0xe7, 0x78, 0xdb, 0x9b, 0xed, 0x92, 0xa6, 0x50, 0xca, 0x1d, 0xdb, 0xb9, 0x1f, - 0xcf, 0x06, 0x0c, 0xf7, 0x68, 0x38, 0xc6, 0x8d, 0xb6, 0xfc, 0x50, 0x2f, 0x9d, 0x25, 0xd4, 0x8f, - 0x26, 0x85, 0x79, 0xf4, 0x04, 0xf2, 0xb6, 0x96, 0xf5, 0x9e, 0x2b, 0x10, 0xb7, 0xd6, 0x9a, 0x7e, - 0xac, 0xdc, 0x5a, 0x6b, 0x62, 0x06, 0x81, 0x36, 0x61, 0x88, 0xa5, 0x53, 0x16, 0x1d, 0xf2, 0xd9, - 0xa3, 0x0d, 0x5b, 0x41, 0xdf, 0xa5, 0xd8, 0x2f, 0x8a, 0x1d, 0x1c, 0xf9, 0x63, 0x18, 0x0f, 0x85, - 0x10, 0xf4, 0x11, 0x8c, 0x69, 0x86, 0xd2, 0xae, 0x2b, 0x9a, 0xa2, 0xb7, 0x88, 0xfb, 0xb5, 0xe3, - 0x5a, 0xd2, 0xd9, 0x5b, 0x0b, 0xf0, 0x89, 0x00, 0x34, 0x2b, 0x94, 0x8c, 0x05, 0x69, 0x38, 0x84, - 0x28, 0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0xa7, 0x3a, 0xa9, 0x6e, 0xa4, 0x3e, 0xc2, - 0x2c, 0x64, 0x0e, 0x4c, 0xb1, 0x33, 0x8e, 0x6e, 0x03, 0x50, 0xd2, 0xb2, 0x88, 0xcd, 0xb7, 0x33, - 0x17, 0xfe, 0x62, 0xda, 0xf4, 0x28, 0x38, 0xc0, 0x25, 0xff, 0xa3, 0x04, 0xe3, 0x1b, 0xc4, 0xfe, - 0xc4, 0xb0, 0xf6, 0x1b, 0x86, 0xa6, 0xb6, 0xfa, 0xe7, 0x90, 0x07, 0x70, 0x28, 0x0f, 0xbc, 0x39, - 0x60, 0x67, 0x42, 0xd6, 0xa5, 0x65, 0x03, 0xf9, 0x4b, 0x09, 0xe6, 0x43, 0x9c, 0x8f, 0xfc, 0xc3, - 0xbf, 0x0d, 0x43, 0xa6, 0x61, 0xd9, 0x6e, 0x8d, 0x70, 0x2a, 0x85, 0x2c, 0xc2, 0x06, 0xaa, 0x04, - 0x06, 0x83, 0x1d, 0x34, 0xb4, 0x06, 0x39, 0xdb, 0x10, 0xae, 0x7a, 0x3a, 0x4c, 0x42, 0xac, 0x3a, - 0x08, 0xcc, 0xdc, 0x96, 0x81, 0x73, 0xb6, 0xc1, 0x36, 0xa2, 0x1c, 0xe2, 0x0a, 0x86, 0xaf, 0x57, - 0x34, 0x03, 0x0c, 0x85, 0x5d, 0xcb, 0xe8, 0x9e, 0x79, 0x0e, 0xde, 0x46, 0xac, 0x58, 0x46, 0x17, - 0x73, 0x2c, 0xf9, 0x2b, 0x09, 0xa6, 0x43, 0x9c, 0xe7, 0x90, 0x3a, 0xde, 0x09, 0xa7, 0x8e, 0x1b, - 0xa7, 0x99, 0x48, 0x4a, 0x02, 0xf9, 0x2a, 0x17, 0x99, 0x06, 0x9b, 0x30, 0xda, 0x85, 0x51, 0xd3, - 0x68, 0x37, 0x5f, 0xc2, 0xf7, 0xcd, 0x49, 0x96, 0xd2, 0x1b, 0x3e, 0x16, 0x0e, 0x02, 0xa3, 0x43, - 0x98, 0xd6, 0x95, 0x2e, 0xa1, 0xa6, 0xd2, 0x22, 0xcd, 0x97, 0xf0, 0xe2, 0x73, 0x91, 0x7f, 0x40, - 0x89, 0x22, 0xe2, 0xb8, 0x12, 0xb4, 0x0e, 0x45, 0xd5, 0xe4, 0x25, 0xa6, 0xa8, 0x25, 0x06, 0xe6, - 0x61, 0xa7, 0x20, 0x75, 0xe2, 0xb9, 0xf8, 0x81, 0x5d, 0x0c, 0xf9, 0x5f, 0xa3, 0xde, 0xc0, 0x2b, - 0x96, 0xc7, 0x50, 0xe2, 0x9d, 0x26, 0x2d, 0x43, 0x73, 0x3f, 0x75, 0xf0, 0xcb, 0x85, 0x18, 0x7b, - 0x71, 0x54, 0xb9, 0x9c, 0xf0, 0x8a, 0xed, 0x92, 0xb1, 0x27, 0x8c, 0x36, 0xa0, 0x60, 0xfe, 0x90, - 0xe2, 0x8a, 0xa7, 0x49, 0x5e, 0x51, 0x71, 0x1c, 0xf4, 0x6b, 0x50, 0x24, 0x7a, 0x9b, 0xd7, 0x6b, - 0xce, 0x3b, 0x02, 0x9f, 0xd5, 0x23, 0x67, 0x08, 0xbb, 0x34, 0xf9, 0x0f, 0xf2, 0x91, 0x59, 0xf1, - 0x9c, 0xfa, 0xec, 0xa5, 0x39, 0x87, 0x57, 0xf3, 0xa5, 0x3a, 0xc8, 0x0e, 0x14, 0x45, 0x46, 0x16, - 0x3e, 0xff, 0xf3, 0xd3, 0xf8, 0x7c, 0x30, 0xd9, 0x79, 0x57, 0x2e, 0x77, 0xd0, 0x05, 0x46, 0x1f, - 0xc2, 0x30, 0x71, 0x54, 0x38, 0x29, 0xf4, 0xde, 0x69, 0x54, 0xf8, 0xe1, 0xd7, 0x2f, 0xb5, 0xc5, - 0x98, 0x40, 0x45, 0xbf, 0x64, 0xeb, 0xc5, 0x78, 0x59, 0x65, 0x4a, 0xcb, 0x05, 0x9e, 0xd5, 0xae, - 0x38, 0xd3, 0xf6, 0x86, 0x5f, 0x1c, 0x55, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0x3f, 0x4b, 0x30, - 0xcd, 0x57, 0xa8, 0xd5, 0xb3, 0x54, 0xbb, 0x7f, 0x6e, 0xf9, 0xeb, 0x69, 0x28, 0x7f, 0xdd, 0x19, - 0xb0, 0x2c, 0x31, 0x0b, 0x53, 0x73, 0xd8, 0xd7, 0x12, 0x5c, 0x8c, 0x71, 0x9f, 0x43, 0xf8, 0xdc, - 0x0e, 0x87, 0xcf, 0x37, 0x4f, 0x3b, 0xa1, 0x94, 0x10, 0xfa, 0x5f, 0xd3, 0x09, 0xd3, 0xe1, 0x27, - 0xe5, 0x36, 0x80, 0x69, 0xa9, 0x07, 0xaa, 0x46, 0x3a, 0xe2, 0xe3, 0x7f, 0x29, 0xd0, 0xda, 0xe5, - 0x51, 0x70, 0x80, 0x0b, 0x51, 0x98, 0x6b, 0x93, 0x5d, 0xa5, 0xa7, 0xd9, 0x8b, 0xed, 0xf6, 0x92, - 0x62, 0x2a, 0x3b, 0xaa, 0xa6, 0xda, 0xaa, 0x78, 0x26, 0x19, 0xa9, 0x3f, 0x74, 0x3e, 0xca, 0x27, - 0x71, 0xbc, 0x38, 0xaa, 0x5c, 0x49, 0xfa, 0x2a, 0xe6, 0xb2, 0xf4, 0x71, 0x0a, 0x34, 0xea, 0x43, - 0xd9, 0x22, 0x1f, 0xf7, 0x54, 0x8b, 0xb4, 0x97, 0x2d, 0xc3, 0x0c, 0xa9, 0xcd, 0x73, 0xb5, 0xbf, - 0x75, 0x7c, 0x54, 0x29, 0xe3, 0x14, 0x9e, 0xc1, 0x8a, 0x53, 0xe1, 0xd1, 0x33, 0x98, 0x51, 0x44, - 0x13, 0x5e, 0x50, 0xab, 0x73, 0x4a, 0xee, 0x1f, 0x1f, 0x55, 0x66, 0x16, 0xe3, 0xe4, 0xc1, 0x0a, - 0x93, 0x40, 0x51, 0x0d, 0x8a, 0x07, 0xbc, 0x5f, 0x8f, 0x96, 0x87, 0x38, 0x3e, 0xcb, 0x17, 0x45, - 0xa7, 0x85, 0x8f, 0x61, 0x0e, 0xaf, 0x34, 0xf9, 0xe9, 0x73, 0xb9, 0xd8, 0x95, 0x98, 0x95, 0x9c, - 0xe2, 0xc4, 0xf3, 0x97, 0xf2, 0x92, 0x1f, 0xb5, 0x9e, 0xf8, 0x24, 0x1c, 0xe4, 0x43, 0x1f, 0xc0, - 0xc8, 0x9e, 0x78, 0x57, 0xa1, 0xe5, 0x62, 0xa6, 0x5c, 0x1d, 0x7a, 0x87, 0xa9, 0x4f, 0x0b, 0x15, - 0x23, 0xee, 0x30, 0xc5, 0x3e, 0x22, 0x7a, 0x1d, 0x8a, 0xfc, 0xc7, 0xea, 0x32, 0x7f, 0x86, 0x2c, - 0xf9, 0xb1, 0xed, 0x89, 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xda, 0x58, 0xe2, 0xcf, 0xe1, 0x11, - 0xd6, 0xd5, 0xc6, 0x12, 0x76, 0xe9, 0xe8, 0x23, 0x28, 0x52, 0xb2, 0xa6, 0xea, 0xbd, 0xc3, 0x32, - 0x64, 0xfa, 0x98, 0xde, 0x7c, 0xc4, 0xb9, 0x23, 0x0f, 0x82, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, - 0xda, 0x83, 0x11, 0xab, 0xa7, 0x2f, 0xd2, 0x6d, 0x4a, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, - 0x63, 0x97, 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, - 0x6f, 0x8f, 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x4f, 0x9c, 0xe2, 0xfb, 0x83, 0x47, - 0xc6, 0x01, 0x6c, 0xf4, 0x87, 0x12, 0x20, 0xda, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x6e, 0x2b, 0x1a, - 0x1f, 0xa5, 0xe5, 0x31, 0xae, 0xf2, 0xed, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0x3e, 0x2b, - 0xc4, 0x59, 0x71, 0x82, 0x5e, 0xb6, 0x89, 0xbb, 0x62, 0xd6, 0xe3, 0x99, 0x36, 0x31, 0xf9, 0x55, - 0xd7, 0xdf, 0x44, 0x41, 0xc7, 0x2e, 0x2c, 0x7a, 0x0a, 0x73, 0x6e, 0x63, 0x29, 0x36, 0x0c, 0x7b, - 0x45, 0xd5, 0x08, 0xed, 0x53, 0x9b, 0x74, 0xcb, 0x13, 0xdc, 0xc1, 0xbc, 0xee, 0x1a, 0x9c, 0xc8, - 0x85, 0x53, 0xa4, 0x51, 0x17, 0x2a, 0x6e, 0x70, 0x62, 0x27, 0xd7, 0x8b, 0x8e, 0x8f, 0x68, 0x4b, - 0xd1, 0x9c, 0x2f, 0x2d, 0x93, 0x5c, 0xc1, 0x6b, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, - 0x10, 0x16, 0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0xa7, 0x2c, 0xe2, 0xa5, 0x2a, - 0x48, 0x95, 0x46, 0x36, 0x4c, 0x29, 0xe1, 0x16, 0x5f, 0x5a, 0x9e, 0xce, 0xf4, 0x68, 0x1b, 0xe9, - 0x0c, 0xf6, 0xdf, 0x57, 0x22, 0x04, 0x8a, 0x63, 0x1a, 0xd0, 0xef, 0x01, 0x52, 0xa2, 0x5d, 0xc9, - 0xb4, 0x8c, 0x32, 0x25, 0xba, 0x58, 0x3b, 0xb3, 0xef, 0x76, 0x31, 0x12, 0xc5, 0x09, 0x7a, 0x58, - 0x1d, 0xaf, 0x44, 0x3a, 0xa9, 0x69, 0x79, 0x9e, 0x2b, 0xaf, 0x65, 0x53, 0xee, 0xc9, 0x05, 0x3e, - 0x28, 0x45, 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0x59, 0x31, 0xb8, 0xad, 0x53, 0x65, 0x97, 0x34, - 0xfb, 0xb4, 0x65, 0x6b, 0xb4, 0x3c, 0xc3, 0xe3, 0x3b, 0xff, 0xa8, 0xb9, 0x98, 0x40, 0xc7, 0x89, - 0x52, 0xe8, 0x6d, 0x98, 0xda, 0x35, 0xac, 0x1d, 0xb5, 0xdd, 0x26, 0xba, 0x8b, 0x34, 0xcb, 0x91, - 0xf8, 0x73, 0xd1, 0x4a, 0x84, 0x86, 0x63, 0xdc, 0x88, 0xc2, 0x45, 0x81, 0xdc, 0xb0, 0x8c, 0xd6, - 0xba, 0xd1, 0xd3, 0x6d, 0xa7, 0xec, 0xbb, 0xe8, 0xa5, 0xd1, 0x8b, 0x8b, 0x49, 0x0c, 0x2f, 0x8e, - 0x2a, 0x57, 0x93, 0x2f, 0x03, 0x3e, 0x13, 0x4e, 0xc6, 0x46, 0x26, 0x8c, 0x89, 0xfe, 0x78, 0xfe, - 0x6e, 0x55, 0x2e, 0xf3, 0xa3, 0xff, 0x60, 0x70, 0xc0, 0xf3, 0x44, 0xa2, 0xe7, 0x7f, 0xea, 0xf8, - 0xa8, 0x32, 0x16, 0x64, 0xc0, 0x21, 0x0d, 0xbc, 0x1f, 0x4a, 0x7c, 0x85, 0x3b, 0x9f, 0x9e, 0xf2, - 0xd3, 0xf5, 0x43, 0xf9, 0xa6, 0xbd, 0xb4, 0x7e, 0xa8, 0x00, 0xe4, 0xc9, 0x2f, 0xeb, 0xff, 0x99, - 0x83, 0x19, 0x9f, 0x39, 0x73, 0x3f, 0x54, 0x82, 0xc8, 0xff, 0xf7, 0x95, 0x67, 0xeb, 0x51, 0xf2, - 0x97, 0xee, 0x7f, 0x5f, 0x8f, 0x92, 0x6f, 0x5b, 0xca, 0xed, 0xe1, 0xaf, 0x73, 0xc1, 0x09, 0x9c, - 0xb2, 0x51, 0xe6, 0x25, 0xb4, 0x56, 0xff, 0xe8, 0x7a, 0x6d, 0xe4, 0xaf, 0xf3, 0x30, 0x15, 0x3d, - 0x8d, 0xa1, 0x7e, 0x0a, 0x69, 0x60, 0x3f, 0x45, 0x03, 0x66, 0x77, 0x7b, 0x9a, 0xd6, 0xe7, 0x73, - 0x08, 0x34, 0x55, 0x38, 0x5f, 0x36, 0x7f, 0x2a, 0x24, 0x67, 0x57, 0x12, 0x78, 0x70, 0xa2, 0x64, - 0xbc, 0xbd, 0xa2, 0xf0, 0x43, 0xdb, 0x2b, 0x86, 0xce, 0xd0, 0x5e, 0x91, 0xdc, 0xa1, 0x92, 0x3f, - 0x53, 0x87, 0xca, 0x59, 0x7a, 0x2b, 0x12, 0x82, 0xd8, 0xc0, 0x3e, 0xe1, 0x5f, 0xc0, 0x44, 0xb8, - 0xdf, 0xc7, 0xd9, 0x4b, 0xa7, 0xe5, 0x48, 0x7c, 0x41, 0x0e, 0xec, 0xa5, 0x33, 0x8e, 0x3d, 0x0e, - 0xf9, 0x58, 0x82, 0xb9, 0xe4, 0xbe, 0x5e, 0xa4, 0xc1, 0x44, 0x57, 0x39, 0x0c, 0xf6, 0x5a, 0x4b, - 0x67, 0x7c, 0x40, 0xe3, 0x8d, 0x1e, 0xeb, 0x21, 0x2c, 0x1c, 0xc1, 0x46, 0xef, 0x43, 0xa9, 0xab, - 0x1c, 0x36, 0x7b, 0x56, 0x87, 0x9c, 0xf9, 0xa1, 0x8e, 0x1f, 0xa3, 0x75, 0x81, 0x82, 0x3d, 0x3c, - 0xf9, 0x7b, 0x09, 0xe6, 0x53, 0xda, 0x37, 0xfe, 0x0f, 0xcd, 0xf2, 0x2f, 0x24, 0xf8, 0x49, 0xea, - 0x35, 0x0c, 0xdd, 0x0b, 0x75, 0x9a, 0xc8, 0x91, 0x4e, 0x13, 0x14, 0x17, 0x7c, 0x45, 0x8d, 0x26, - 0x9f, 0x4b, 0x50, 0x4e, 0xbb, 0x97, 0xa2, 0xbb, 0x21, 0x23, 0x7f, 0x16, 0x31, 0x72, 0x3a, 0x26, - 0xf7, 0x8a, 0x6c, 0xfc, 0x17, 0x09, 0x2e, 0x9f, 0x50, 0xdf, 0x79, 0xd7, 0x1f, 0xd2, 0x0e, 0x72, - 0xf1, 0x97, 0x73, 0xf1, 0xd9, 0xcd, 0xbf, 0xfe, 0x24, 0xf0, 0xe0, 0x54, 0x69, 0xb4, 0x0d, 0xf3, - 0xe2, 0xee, 0x15, 0xa5, 0x89, 0xd2, 0x85, 0x37, 0xe4, 0x2d, 0x27, 0xb3, 0xe0, 0x34, 0x59, 0xf9, - 0xaf, 0x24, 0x98, 0x4b, 0x7e, 0x70, 0x40, 0x6f, 0x85, 0x96, 0xbc, 0x12, 0x59, 0xf2, 0xc9, 0x88, - 0x94, 0x58, 0xf0, 0x0f, 0x61, 0x42, 0x3c, 0x4b, 0x08, 0x18, 0xe1, 0xcc, 0x72, 0x52, 0x76, 0x12, - 0x10, 0x6e, 0x71, 0xcc, 0x8f, 0x49, 0x78, 0x0c, 0x47, 0xd0, 0xe4, 0x4f, 0x73, 0x30, 0xd4, 0x6c, - 0x29, 0x1a, 0x39, 0x87, 0xda, 0xf8, 0x57, 0xa1, 0xda, 0x78, 0xd0, 0x7f, 0x75, 0xe3, 0x56, 0xa5, - 0x96, 0xc5, 0x38, 0x52, 0x16, 0xbf, 0x91, 0x09, 0xed, 0xe4, 0x8a, 0xf8, 0x37, 0x61, 0xc4, 0x53, - 0x7a, 0xba, 0x44, 0x2d, 0xff, 0x79, 0x0e, 0x46, 0x03, 0x2a, 0x4e, 0x99, 0xe6, 0x77, 0x43, 0xb5, - 0x4d, 0x3e, 0xc3, 0x23, 0x50, 0x40, 0x57, 0xd5, 0xad, 0x66, 0x9c, 0x56, 0x6d, 0xbf, 0x39, 0x37, - 0x5e, 0xe4, 0xfc, 0x02, 0x26, 0x6c, 0xc5, 0xea, 0x10, 0xdb, 0xfb, 0x28, 0xe2, 0xb4, 0x92, 0x79, - 0xff, 0x67, 0x60, 0x2b, 0x44, 0xc5, 0x11, 0xee, 0x4b, 0x0f, 0x61, 0x3c, 0xa4, 0xec, 0x54, 0x9d, - 0xd6, 0x7f, 0x23, 0xc1, 0xcf, 0x06, 0x3e, 0x24, 0xa1, 0x7a, 0xe8, 0x90, 0x54, 0x23, 0x87, 0x64, - 0x21, 0x1d, 0xe0, 0xd5, 0x75, 0xec, 0xd5, 0x6f, 0x3e, 0xff, 0x6e, 0xe1, 0xc2, 0x37, 0xdf, 0x2d, - 0x5c, 0xf8, 0xf6, 0xbb, 0x85, 0x0b, 0xbf, 0x7f, 0xbc, 0x20, 0x3d, 0x3f, 0x5e, 0x90, 0xbe, 0x39, - 0x5e, 0x90, 0xbe, 0x3d, 0x5e, 0x90, 0xfe, 0xfd, 0x78, 0x41, 0xfa, 0xe3, 0xef, 0x17, 0x2e, 0xbc, - 0x5f, 0x14, 0x70, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x9d, 0xa0, 0x1e, 0x3d, 0x3f, 0x00, - 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + proto.RegisterFile("k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_90a532284de28347) +} + +var fileDescriptor_90a532284de28347 = []byte{ + // 2842 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3, + 0x46, 0x84, 0x4d, 0xd8, 0x9d, 0x61, 0x37, 0xc9, 0x92, 0x0f, 0x29, 0x61, 0xc7, 0xbb, 0xc9, 0x3a, + 0xb1, 0xc7, 0x93, 0x9a, 0x71, 0x82, 0x22, 0x02, 0xb4, 0x7b, 0xca, 0xe3, 0x8e, 0x7b, 0xba, 0x47, + 0xdd, 0x35, 0x66, 0x7d, 0x03, 0xc1, 0x25, 0x27, 0xb8, 0x04, 0x38, 0x22, 0x21, 0x71, 0xe5, 0xca, + 0x21, 0x44, 0x20, 0x82, 0xb4, 0x42, 0x1c, 0x22, 0x71, 0x20, 0x27, 0x8b, 0x38, 0x27, 0xc4, 0x3f, + 0x80, 0xf6, 0x84, 0xea, 0xa3, 0xab, 0xbf, 0xed, 0x1e, 0xe3, 0x58, 0x04, 0x71, 0x5a, 0x4f, 0xbd, + 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0xeb, 0xbb, 0xcf, 0xf9, 0x35, + 0xcb, 0xad, 0x1b, 0x03, 0xab, 0x4e, 0xee, 0x53, 0xe2, 0xf8, 0x96, 0xeb, 0xf8, 0xf5, 0xbd, 0x1b, + 0x5b, 0x84, 0x1a, 0x37, 0xea, 0x3d, 0xe2, 0x10, 0xcf, 0xa0, 0xa4, 0x5b, 0x1b, 0x78, 0x2e, 0x75, + 0xd1, 0x63, 0x42, 0xbd, 0x66, 0x0c, 0xac, 0x5a, 0xa8, 0x5e, 0x93, 0xea, 0x8b, 0xd7, 0x7b, 0x16, + 0xdd, 0x19, 0x6e, 0xd5, 0x4c, 0xb7, 0x5f, 0xef, 0xb9, 0x3d, 0xb7, 0xce, 0xad, 0xb6, 0x86, 0xdb, + 0xfc, 0x17, 0xff, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0x7a, 0xa4, 0x73, 0xd3, 0xf5, 0x48, 0x7d, 0x2f, + 0xd5, 0xe3, 0xe2, 0x33, 0xa1, 0x4e, 0xdf, 0x30, 0x77, 0x2c, 0x87, 0x78, 0xfb, 0xf5, 0xc1, 0x6e, + 0x8f, 0x35, 0xf8, 0xf5, 0x3e, 0xa1, 0x46, 0x96, 0x55, 0x3d, 0xcf, 0xca, 0x1b, 0x3a, 0xd4, 0xea, + 0x93, 0x94, 0xc1, 0xad, 0xe3, 0x0c, 0x7c, 0x73, 0x87, 0xf4, 0x8d, 0x94, 0xdd, 0xd3, 0x79, 0x76, + 0x43, 0x6a, 0xd9, 0x75, 0xcb, 0xa1, 0x3e, 0xf5, 0x92, 0x46, 0xfa, 0xfb, 0x25, 0x98, 0xbc, 0x63, + 0x90, 0xbe, 0xeb, 0xb4, 0x09, 0x45, 0xdf, 0x83, 0x2a, 0x1b, 0x46, 0xd7, 0xa0, 0xc6, 0x82, 0xf6, + 0xb8, 0x76, 0x75, 0xea, 0xe6, 0xd7, 0x6b, 0xe1, 0x34, 0x2b, 0xd4, 0xda, 0x60, 0xb7, 0xc7, 0x1a, + 0xfc, 0x1a, 0xd3, 0xae, 0xed, 0xdd, 0xa8, 0x6d, 0x6c, 0xbd, 0x4b, 0x4c, 0xba, 0x4e, 0xa8, 0xd1, + 0x40, 0x0f, 0x0e, 0x96, 0xcf, 0x1d, 0x1e, 0x2c, 0x43, 0xd8, 0x86, 0x15, 0x2a, 0x6a, 0xc2, 0x98, + 0x3f, 0x20, 0xe6, 0x42, 0x89, 0xa3, 0x5f, 0xab, 0x1d, 0xb9, 0x88, 0x35, 0xe5, 0x59, 0x7b, 0x40, + 0xcc, 0xc6, 0x79, 0x89, 0x3c, 0xc6, 0x7e, 0x61, 0x8e, 0x83, 0xde, 0x84, 0x71, 0x9f, 0x1a, 0x74, + 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1, 0x1b, + 0x4b, 0x34, 0xfd, 0x1f, 0x25, 0x40, 0x4a, 0x77, 0xc5, 0x75, 0xba, 0x16, 0xb5, 0x5c, 0x07, 0xbd, + 0x00, 0x63, 0x74, 0x7f, 0x40, 0xf8, 0xe4, 0x4c, 0x36, 0x9e, 0x08, 0x1c, 0xea, 0xec, 0x0f, 0xc8, + 0xc3, 0x83, 0xe5, 0xcb, 0x69, 0x0b, 0x26, 0xc1, 0xdc, 0x06, 0xad, 0x29, 0x57, 0x4b, 0xdc, 0xfa, + 0x99, 0x78, 0xd7, 0x0f, 0x0f, 0x96, 0x33, 0x82, 0xb0, 0xa6, 0x90, 0xe2, 0x0e, 0xa2, 0x3d, 0x40, + 0xb6, 0xe1, 0xd3, 0x8e, 0x67, 0x38, 0xbe, 0xe8, 0xc9, 0xea, 0x13, 0x39, 0x09, 0x4f, 0x15, 0x5b, + 0x34, 0x66, 0xd1, 0x58, 0x94, 0x5e, 0xa0, 0xb5, 0x14, 0x1a, 0xce, 0xe8, 0x01, 0x3d, 0x01, 0xe3, + 0x1e, 0x31, 0x7c, 0xd7, 0x59, 0x18, 0xe3, 0xa3, 0x50, 0x13, 0x88, 0x79, 0x2b, 0x96, 0x52, 0xf4, + 0x24, 0x4c, 0xf4, 0x89, 0xef, 0x1b, 0x3d, 0xb2, 0x50, 0xe1, 0x8a, 0xb3, 0x52, 0x71, 0x62, 0x5d, + 0x34, 0xe3, 0x40, 0xae, 0x7f, 0xa0, 0xc1, 0xb4, 0x9a, 0xb9, 0x35, 0xcb, 0xa7, 0xe8, 0xdb, 0xa9, + 0x38, 0xac, 0x15, 0x1b, 0x12, 0xb3, 0xe6, 0x51, 0x78, 0x41, 0xf6, 0x56, 0x0d, 0x5a, 0x22, 0x31, + 0xb8, 0x0e, 0x15, 0x8b, 0x92, 0x3e, 0x5b, 0x87, 0xf2, 0xd5, 0xa9, 0x9b, 0x57, 0x8b, 0x86, 0x4c, + 0x63, 0x5a, 0x82, 0x56, 0x56, 0x99, 0x39, 0x16, 0x28, 0xfa, 0xcf, 0xc6, 0x22, 0xee, 0xb3, 0xd0, + 0x44, 0xef, 0x40, 0xd5, 0x27, 0x36, 0x31, 0xa9, 0xeb, 0x49, 0xf7, 0x9f, 0x2e, 0xe8, 0xbe, 0xb1, + 0x45, 0xec, 0xb6, 0x34, 0x6d, 0x9c, 0x67, 0xfe, 0x07, 0xbf, 0xb0, 0x82, 0x44, 0x6f, 0x40, 0x95, + 0x92, 0xfe, 0xc0, 0x36, 0x28, 0x91, 0xfb, 0xe8, 0xcb, 0xd1, 0x21, 0xb0, 0xc8, 0x61, 0x60, 0x2d, + 0xb7, 0xdb, 0x91, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0x7b, 0x30, 0x33, + 0x1c, 0x74, 0x99, 0x26, 0x65, 0xd9, 0xa1, 0xb7, 0x2f, 0x23, 0xe9, 0x56, 0xd1, 0xb9, 0xd9, 0x8c, + 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3, 0x7d, 0xcb, + 0xc1, 0xc4, 0xe8, 0xee, 0xb7, 0x89, 0xe9, 0x3a, 0x5d, 0x9f, 0x87, 0x55, 0xa5, 0x31, 0x2f, 0x01, + 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48, 0x6e, 0x96, + 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x49, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06, 0x73, 0x1e, + 0xd9, 0xb3, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0xfd, 0x35, 0xab, 0x6f, 0xd1, 0x85, 0x71, + 0xee, 0xd3, 0xc2, 0xe1, 0xc1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0xe7, 0xe3, 0x30, + 0x9b, 0xc8, 0x37, 0xe8, 0x4d, 0xb8, 0x6c, 0x0e, 0x3d, 0x8f, 0x38, 0xb4, 0x39, 0xec, 0x6f, 0x11, + 0xaf, 0x6d, 0xee, 0x90, 0xee, 0xd0, 0x26, 0x5d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4, 0xf8, 0xf2, + 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15, 0x66, 0x89, + 0x63, 0xaa, 0x59, 0x68, 0xa6, 0x34, 0x70, 0x86, 0x15, 0xf3, 0xb1, 0x4b, 0x7c, 0xcb, 0x23, 0xdd, + 0xa4, 0x8f, 0xe5, 0xb8, 0x8f, 0x77, 0x32, 0xb5, 0x70, 0x8e, 0x35, 0x7a, 0x16, 0xa6, 0x44, 0x6f, + 0x7c, 0xfd, 0xe4, 0x42, 0x5f, 0x92, 0x60, 0x53, 0xcd, 0x50, 0x84, 0xa3, 0x7a, 0x6c, 0x68, 0xee, + 0x96, 0x4f, 0xbc, 0x3d, 0xd2, 0xcd, 0x5f, 0xe0, 0x8d, 0x94, 0x06, 0xce, 0xb0, 0x62, 0x43, 0x13, + 0x11, 0x98, 0x1a, 0xda, 0x78, 0x7c, 0x68, 0x9b, 0x99, 0x5a, 0x38, 0xc7, 0x9a, 0xc5, 0xb1, 0x70, + 0xf9, 0xf6, 0x9e, 0x61, 0xd9, 0xc6, 0x96, 0x4d, 0x16, 0x26, 0xe2, 0x71, 0xdc, 0x8c, 0x8b, 0x71, + 0x52, 0x1f, 0xbd, 0x0a, 0x17, 0x45, 0xd3, 0xa6, 0x63, 0x28, 0x90, 0x2a, 0x07, 0x79, 0x54, 0x82, + 0x5c, 0x6c, 0x26, 0x15, 0x70, 0xda, 0x06, 0xbd, 0x00, 0x33, 0xa6, 0x6b, 0xdb, 0x3c, 0x1e, 0x57, + 0xdc, 0xa1, 0x43, 0x17, 0x26, 0x39, 0x0a, 0x62, 0xfb, 0x71, 0x25, 0x26, 0xc1, 0x09, 0x4d, 0x44, + 0x00, 0xcc, 0xa0, 0xe0, 0xf8, 0x0b, 0xc0, 0xf3, 0xe3, 0x8d, 0xa2, 0x39, 0x40, 0x95, 0xaa, 0x90, + 0x03, 0xa8, 0x26, 0x1f, 0x47, 0x80, 0xf5, 0x3f, 0x6b, 0x30, 0x9f, 0x93, 0x3a, 0xd0, 0xcb, 0xb1, + 0x12, 0xfb, 0xb5, 0x44, 0x89, 0xbd, 0x92, 0x63, 0x16, 0xa9, 0xb3, 0x0e, 0x4c, 0x7b, 0x6c, 0x54, + 0x4e, 0x4f, 0xa8, 0xc8, 0x1c, 0xf9, 0xec, 0x31, 0xc3, 0xc0, 0x51, 0x9b, 0x30, 0xe7, 0x5f, 0x3c, + 0x3c, 0x58, 0x9e, 0x8e, 0xc9, 0x70, 0x1c, 0x5e, 0xff, 0x45, 0x09, 0xe0, 0x0e, 0x19, 0xd8, 0xee, + 0x7e, 0x9f, 0x38, 0x67, 0xc1, 0xa1, 0x36, 0x62, 0x1c, 0xea, 0xfa, 0x71, 0xcb, 0xa3, 0x5c, 0xcb, + 0x25, 0x51, 0x6f, 0x25, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14, + 0x2a, 0x87, 0x34, 0xea, 0xc5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x9f, 0x1b, + 0x8f, 0x7a, 0x17, 0x66, 0x18, 0xcb, 0x11, 0x6b, 0xc9, 0x39, 0xd4, 0xf8, 0xc8, 0x1c, 0x4a, 0x55, + 0xbb, 0xb5, 0x18, 0x12, 0x4e, 0x20, 0xe7, 0x70, 0xb6, 0x89, 0x2f, 0x22, 0x67, 0xfb, 0x50, 0x83, + 0x99, 0x70, 0x99, 0xce, 0x80, 0xb4, 0x35, 0xe3, 0xa4, 0xed, 0xc9, 0xc2, 0x21, 0x9a, 0xc3, 0xda, + 0xfe, 0xc5, 0x08, 0xbe, 0x52, 0x62, 0x1b, 0x7c, 0xcb, 0x30, 0x77, 0xd1, 0xe3, 0x30, 0xe6, 0x18, + 0xfd, 0x20, 0x32, 0xd5, 0x66, 0x69, 0x1a, 0x7d, 0x82, 0xb9, 0x04, 0xbd, 0xaf, 0x01, 0x92, 0x55, + 0xe0, 0xb6, 0xe3, 0xb8, 0xd4, 0x10, 0xb9, 0x52, 0xb8, 0xb5, 0x5a, 0xd8, 0xad, 0xa0, 0xc7, 0xda, + 0x66, 0x0a, 0xeb, 0xae, 0x43, 0xbd, 0xfd, 0x70, 0x91, 0xd3, 0x0a, 0x38, 0xc3, 0x01, 0x64, 0x00, + 0x78, 0x12, 0xb3, 0xe3, 0xca, 0x8d, 0x7c, 0xbd, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d, + 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x85, 0xf9, 0x1c, 0x6f, 0xd1, 0x05, + 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91, + 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x42, 0xe9, 0x39, 0x4d, 0xff, 0xa0, 0x12, 0x8d, 0x1d, 0xce, 0x98, + 0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3, + 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0xcf, 0x97, 0x5b, 0x97, 0x4f, 0x87, 0x5b, 0x7f, 0x17, 0xaa, 0x7e, + 0xc0, 0xaa, 0xc7, 0x38, 0xe4, 0x8d, 0x11, 0xf2, 0xab, 0x24, 0xd4, 0xaa, 0x03, 0x45, 0xa5, 0x15, + 0x68, 0x16, 0x89, 0xae, 0x8c, 0x48, 0xa2, 0x4f, 0x95, 0xf8, 0xb2, 0x7c, 0x33, 0x30, 0x86, 0x3e, + 0xe9, 0xf2, 0xdc, 0x56, 0x0d, 0xf3, 0x4d, 0x8b, 0xb7, 0x62, 0x29, 0x45, 0xef, 0xc4, 0x42, 0xb6, + 0x7a, 0x92, 0x90, 0x9d, 0xc9, 0x0f, 0x57, 0xb4, 0x09, 0xf3, 0x03, 0xcf, 0xed, 0x79, 0xc4, 0xf7, + 0xef, 0x10, 0xa3, 0x6b, 0x5b, 0x0e, 0x09, 0xe6, 0x47, 0x30, 0xa2, 0x2b, 0x87, 0x07, 0xcb, 0xf3, + 0xad, 0x6c, 0x15, 0x9c, 0x67, 0xab, 0x3f, 0x18, 0x83, 0x0b, 0xc9, 0x0a, 0x98, 0x43, 0x52, 0xb5, + 0x13, 0x91, 0xd4, 0x6b, 0x91, 0xcd, 0x20, 0x18, 0xbc, 0x5a, 0xfd, 0x8c, 0x0d, 0x71, 0x1b, 0x66, + 0x65, 0x36, 0x08, 0x84, 0x92, 0xa6, 0xab, 0xd5, 0xdf, 0x8c, 0x8b, 0x71, 0x52, 0x1f, 0xbd, 0x08, + 0xd3, 0x1e, 0xe7, 0xdd, 0x01, 0x80, 0xe0, 0xae, 0x8f, 0x48, 0x80, 0x69, 0x1c, 0x15, 0xe2, 0xb8, + 0x2e, 0xe3, 0xad, 0x21, 0x1d, 0x0d, 0x00, 0xc6, 0xe2, 0xbc, 0xf5, 0x76, 0x52, 0x01, 0xa7, 0x6d, + 0xd0, 0x3a, 0x5c, 0x1a, 0x3a, 0x69, 0x28, 0x11, 0xca, 0x57, 0x24, 0xd4, 0xa5, 0xcd, 0xb4, 0x0a, + 0xce, 0xb2, 0x43, 0xdb, 0x31, 0x2a, 0x3b, 0xce, 0xd3, 0xf3, 0xcd, 0xc2, 0x1b, 0xaf, 0x30, 0x97, + 0xcd, 0xa0, 0xdb, 0xd5, 0xa2, 0x74, 0x5b, 0xff, 0x83, 0x16, 0x2d, 0x42, 0x8a, 0x02, 0x1f, 0x77, + 0xcb, 0x94, 0xb2, 0x88, 0xb0, 0x23, 0x37, 0x9b, 0xfd, 0xde, 0x1a, 0x89, 0xfd, 0x86, 0xc5, 0xf3, + 0x78, 0xfa, 0xfb, 0x47, 0x0d, 0x66, 0xef, 0x75, 0x3a, 0xad, 0x55, 0x87, 0xef, 0x96, 0x96, 0x41, + 0x77, 0x58, 0x15, 0x1d, 0x18, 0x74, 0x27, 0x59, 0x45, 0x99, 0x0c, 0x73, 0x09, 0x7a, 0x06, 0xaa, + 0xec, 0x5f, 0xe6, 0x38, 0x0f, 0xd7, 0x49, 0x9e, 0x64, 0xaa, 0x2d, 0xd9, 0xf6, 0x30, 0xf2, 0x37, + 0x56, 0x9a, 0xe8, 0x5b, 0x30, 0xc1, 0xf6, 0x36, 0x71, 0xba, 0x05, 0xc9, 0xaf, 0x74, 0xaa, 0x21, + 0x8c, 0x42, 0x3e, 0x23, 0x1b, 0x70, 0x00, 0xa7, 0xef, 0xc2, 0x5c, 0x64, 0x10, 0x78, 0x68, 0x93, + 0x37, 0x59, 0xbd, 0x42, 0x6d, 0xa8, 0xb0, 0xde, 0x59, 0x55, 0x2a, 0x17, 0xb8, 0x5e, 0x4c, 0x4c, + 0x44, 0xc8, 0x3d, 0xd8, 0x2f, 0x1f, 0x0b, 0x2c, 0x7d, 0x03, 0x26, 0x56, 0x5b, 0x0d, 0xdb, 0x15, + 0x7c, 0xc3, 0xb4, 0xba, 0x5e, 0x72, 0xa6, 0x56, 0x56, 0xef, 0x60, 0xcc, 0x25, 0x48, 0x87, 0x71, + 0x72, 0xdf, 0x24, 0x03, 0xca, 0x29, 0xc6, 0x64, 0x03, 0x58, 0x22, 0xbd, 0xcb, 0x5b, 0xb0, 0x94, + 0xe8, 0x3f, 0x29, 0xc1, 0x84, 0xec, 0xf6, 0x0c, 0xce, 0x1f, 0x6b, 0xb1, 0xf3, 0xc7, 0x53, 0xc5, + 0x96, 0x20, 0xf7, 0xf0, 0xd1, 0x49, 0x1c, 0x3e, 0xae, 0x15, 0xc4, 0x3b, 0xfa, 0xe4, 0xf1, 0x5e, + 0x09, 0x66, 0xe2, 0x8b, 0x8f, 0x9e, 0x85, 0x29, 0x96, 0x6a, 0x2d, 0x93, 0x34, 0x43, 0x86, 0xa7, + 0xae, 0x1f, 0xda, 0xa1, 0x08, 0x47, 0xf5, 0x50, 0x4f, 0x99, 0xb5, 0x5c, 0x8f, 0xca, 0x41, 0xe7, + 0x4f, 0xe9, 0x90, 0x5a, 0x76, 0x4d, 0x5c, 0xb6, 0xd7, 0x56, 0x1d, 0xba, 0xe1, 0xb5, 0xa9, 0x67, + 0x39, 0xbd, 0x54, 0x47, 0x0c, 0x0c, 0x47, 0x91, 0xd1, 0x5b, 0x2c, 0xed, 0xfb, 0xee, 0xd0, 0x33, + 0x49, 0x16, 0x7d, 0x0b, 0xa8, 0x07, 0xdb, 0x08, 0xdd, 0x35, 0xd7, 0x34, 0x6c, 0xb1, 0x38, 0x98, + 0x6c, 0x13, 0x8f, 0x38, 0x26, 0x09, 0x28, 0x93, 0x80, 0xc0, 0x0a, 0x4c, 0xff, 0xad, 0x06, 0x53, + 0x72, 0x2e, 0xce, 0x80, 0xa8, 0xbf, 0x1e, 0x27, 0xea, 0x4f, 0x14, 0xdc, 0xa1, 0xd9, 0x2c, 0xfd, + 0x77, 0x1a, 0x2c, 0x06, 0xae, 0xbb, 0x46, 0xb7, 0x61, 0xd8, 0x86, 0x63, 0x12, 0x2f, 0x88, 0xf5, + 0x45, 0x28, 0x59, 0x03, 0xb9, 0x92, 0x20, 0x01, 0x4a, 0xab, 0x2d, 0x5c, 0xb2, 0x06, 0xac, 0x8a, + 0xee, 0xb8, 0x3e, 0xe5, 0x6c, 0x5e, 0x1c, 0x14, 0x95, 0xd7, 0xf7, 0x64, 0x3b, 0x56, 0x1a, 0x68, + 0x13, 0x2a, 0x03, 0xd7, 0xa3, 0xac, 0x72, 0x95, 0x13, 0xeb, 0x7b, 0x84, 0xd7, 0x6c, 0xdd, 0x64, + 0x20, 0x86, 0x3b, 0x9d, 0xc1, 0x60, 0x81, 0xa6, 0xff, 0x50, 0x83, 0x47, 0x33, 0xfc, 0x97, 0xa4, + 0xa1, 0x0b, 0x13, 0x96, 0x10, 0xca, 0xf4, 0xf2, 0x7c, 0xb1, 0x6e, 0x33, 0xa6, 0x22, 0x4c, 0x6d, + 0x41, 0x0a, 0x0b, 0xa0, 0xf5, 0x5f, 0x69, 0x70, 0x31, 0xe5, 0x2f, 0x4f, 0xd1, 0x2c, 0x9e, 0x25, + 0xdb, 0x56, 0x29, 0x9a, 0x85, 0x25, 0x97, 0xa0, 0xd7, 0xa1, 0xca, 0xdf, 0x88, 0x4c, 0xd7, 0x96, + 0x13, 0x58, 0x0f, 0x26, 0xb0, 0x25, 0xdb, 0x1f, 0x1e, 0x2c, 0x5f, 0xc9, 0x38, 0x6b, 0x07, 0x62, + 0xac, 0x00, 0xd0, 0x32, 0x54, 0x88, 0xe7, 0xb9, 0x9e, 0x4c, 0xf6, 0x93, 0x6c, 0xa6, 0xee, 0xb2, + 0x06, 0x2c, 0xda, 0xf5, 0x5f, 0x87, 0x41, 0xca, 0xb2, 0x2f, 0xf3, 0x8f, 0x2d, 0x4e, 0x32, 0x31, + 0xb2, 0xa5, 0xc3, 0x5c, 0x82, 0x86, 0x70, 0xc1, 0x4a, 0xa4, 0x6b, 0xb9, 0x3b, 0xeb, 0xc5, 0xa6, + 0x51, 0x99, 0x35, 0x16, 0x24, 0xfc, 0x85, 0xa4, 0x04, 0xa7, 0xba, 0xd0, 0x09, 0xa4, 0xb4, 0xd0, + 0x1b, 0x30, 0xb6, 0x43, 0xe9, 0x20, 0xe3, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5, 0xa3, + 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2f, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0x6f, 0xaa, 0xd1, + 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7, 0xb4, + 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0xf7, 0xa0, 0x4c, 0xed, + 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d, 0xcc, + 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06, 0xc3, + 0xcd, 0xc7, 0x7e, 0xf9, 0x58, 0xe0, 0xe8, 0x3f, 0xd2, 0x60, 0x3a, 0x56, 0x2d, 0x90, 0x07, 0xe7, + 0xed, 0xc8, 0xde, 0x91, 0xf3, 0xf0, 0xdc, 0xe8, 0xbb, 0x4e, 0x6e, 0xfa, 0x39, 0xd9, 0xef, 0xf9, + 0xa8, 0x0c, 0xc7, 0xfa, 0xd0, 0x0d, 0x80, 0x70, 0xd8, 0x6c, 0x1f, 0xb0, 0xe0, 0x15, 0x1b, 0x5e, + 0xee, 0x03, 0x16, 0xd3, 0x3e, 0x16, 0xed, 0xe8, 0x26, 0x80, 0x4f, 0x4c, 0x8f, 0xd0, 0x66, 0x98, + 0xb8, 0x54, 0x39, 0x6e, 0x2b, 0x09, 0x8e, 0x68, 0xe9, 0x7f, 0xd2, 0x60, 0xba, 0x49, 0xe8, 0xf7, + 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71, 0xf9, + 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0, 0x3f, 0xd4, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a, + 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c, 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d, + 0x19, 0xbd, 0xa3, 0x61, 0x12, 0xe2, 0x85, 0x35, 0xa7, 0xe3, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c, + 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x73, 0x1a, 0x01, 0x86, 0xb1, 0x6d, 0xcf, 0xed, 0x9f, 0x78, 0x0c, + 0x6a, 0x21, 0x5e, 0xf1, 0xdc, 0x3e, 0xe6, 0x58, 0xfa, 0x47, 0x1a, 0x5c, 0x8c, 0x69, 0x9e, 0x01, + 0x6f, 0x78, 0x23, 0xce, 0x1b, 0xae, 0x8d, 0x32, 0x90, 0x1c, 0xf6, 0xf0, 0x51, 0x29, 0x31, 0x0c, + 0x36, 0x60, 0xb4, 0x0d, 0x53, 0x03, 0xb7, 0xdb, 0x3e, 0x85, 0x07, 0xda, 0x59, 0xc6, 0xe7, 0x5a, + 0x21, 0x16, 0x8e, 0x02, 0xa3, 0xfb, 0x70, 0x91, 0x51, 0x0b, 0x7f, 0x60, 0x98, 0xa4, 0x7d, 0x0a, + 0x57, 0x56, 0x8f, 0xf0, 0x17, 0xa0, 0x24, 0x22, 0x4e, 0x77, 0x82, 0xd6, 0x61, 0xc2, 0x1a, 0xf0, + 0xf3, 0x85, 0x24, 0x92, 0xc7, 0x92, 0x30, 0x71, 0x1a, 0x11, 0x29, 0x5e, 0xfe, 0xc0, 0x01, 0x86, + 0xfe, 0xd7, 0x64, 0x34, 0x70, 0xba, 0xfa, 0x6a, 0x84, 0x1e, 0xc8, 0xb7, 0x9a, 0x93, 0x51, 0x83, + 0xa6, 0x64, 0x22, 0x27, 0x65, 0xd6, 0xd5, 0x04, 0x6f, 0xf9, 0x0a, 0x4c, 0x10, 0xa7, 0xcb, 0xc9, + 0xba, 0xb8, 0x08, 0xe1, 0xa3, 0xba, 0x2b, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x5c, 0x4e, 0x8c, 0x8a, + 0x97, 0xd9, 0x77, 0x4f, 0x2d, 0x38, 0x14, 0xe1, 0xcf, 0x0d, 0x90, 0xad, 0x90, 0xfe, 0x89, 0x98, + 0xff, 0xc6, 0x28, 0x31, 0x1f, 0xad, 0x7f, 0xb9, 0xe4, 0x0f, 0x7d, 0x07, 0xc6, 0x89, 0xe8, 0x42, + 0x54, 0xd5, 0x5b, 0xa3, 0x74, 0x11, 0xa6, 0xdf, 0xf0, 0x9c, 0x25, 0xdb, 0x24, 0x2a, 0x7a, 0x99, + 0xcd, 0x17, 0xd3, 0x65, 0xc7, 0x12, 0xc1, 0x9e, 0x27, 0x1b, 0x8f, 0x89, 0x61, 0xab, 0xe6, 0x87, + 0x07, 0xcb, 0x10, 0xfe, 0xc4, 0x51, 0x0b, 0xfe, 0x7a, 0x26, 0xef, 0x6c, 0xce, 0xe6, 0x0b, 0xa4, + 0xd1, 0x5e, 0xcf, 0x42, 0xd7, 0x4e, 0xed, 0xf5, 0x2c, 0x02, 0x79, 0xf4, 0x19, 0xf6, 0x9f, 0x25, + 0xb8, 0x14, 0x2a, 0x17, 0x7e, 0x3d, 0xcb, 0x30, 0xf9, 0xff, 0x57, 0x48, 0xc5, 0x5e, 0xb4, 0xc2, + 0xa9, 0xfb, 0xef, 0x7b, 0xd1, 0x0a, 0x7d, 0xcb, 0xa9, 0x76, 0xbf, 0x29, 0x45, 0x07, 0x30, 0xe2, + 0xb3, 0xca, 0x29, 0x7c, 0x88, 0xf3, 0x85, 0x7b, 0x99, 0xd1, 0xff, 0x52, 0x86, 0x0b, 0xc9, 0xdd, + 0x18, 0xbb, 0x7d, 0xd7, 0x8e, 0xbd, 0x7d, 0x6f, 0xc1, 0xdc, 0xf6, 0xd0, 0xb6, 0xf7, 0xf9, 0x18, + 0x22, 0x57, 0xf0, 0xe2, 0xde, 0xfe, 0x4b, 0xd2, 0x72, 0xee, 0x95, 0x0c, 0x1d, 0x9c, 0x69, 0x99, + 0xbe, 0x8c, 0x1f, 0xfb, 0x4f, 0x2f, 0xe3, 0x2b, 0x27, 0xb8, 0x8c, 0xcf, 0x7e, 0xcf, 0x28, 0x9f, + 0xe8, 0x3d, 0xe3, 0x24, 0x37, 0xf1, 0x19, 0x49, 0xec, 0xd8, 0xaf, 0x4a, 0x5e, 0x82, 0x99, 0xf8, + 0xeb, 0x90, 0x58, 0x4b, 0xf1, 0x40, 0x25, 0xdf, 0x62, 0x22, 0x6b, 0x29, 0xda, 0xb1, 0xd2, 0xd0, + 0x0f, 0x35, 0xb8, 0x9c, 0xfd, 0x15, 0x08, 0xb2, 0x61, 0xa6, 0x6f, 0xdc, 0x8f, 0x7e, 0x99, 0xa3, + 0x9d, 0x90, 0xad, 0xf0, 0x67, 0x81, 0xf5, 0x18, 0x16, 0x4e, 0x60, 0xa3, 0xb7, 0xa1, 0xda, 0x37, + 0xee, 0xb7, 0x87, 0x5e, 0x8f, 0x9c, 0x98, 0x15, 0xf1, 0x6d, 0xb4, 0x2e, 0x51, 0xb0, 0xc2, 0xd3, + 0x3f, 0xd3, 0x60, 0x3e, 0xe7, 0xb2, 0xff, 0x7f, 0x68, 0x94, 0xef, 0x95, 0xa0, 0xd2, 0x36, 0x0d, + 0x9b, 0x9c, 0x01, 0xa1, 0x78, 0x2d, 0x46, 0x28, 0x8e, 0xfb, 0x9a, 0x94, 0x7b, 0x95, 0xcb, 0x25, + 0x70, 0x82, 0x4b, 0x3c, 0x55, 0x08, 0xed, 0x68, 0x1a, 0xf1, 0x3c, 0x4c, 0xaa, 0x4e, 0x47, 0xcb, + 0x6e, 0xfa, 0x2f, 0x4b, 0x30, 0x15, 0xe9, 0x62, 0xc4, 0xdc, 0xb8, 0x1d, 0x2b, 0x08, 0xe5, 0x02, + 0x37, 0x2d, 0x91, 0xbe, 0x6a, 0x41, 0x09, 0x10, 0x5f, 0x43, 0x84, 0xef, 0xdf, 0xe9, 0xca, 0xf0, + 0x12, 0xcc, 0x50, 0xc3, 0xeb, 0x11, 0xaa, 0x68, 0xbb, 0xb8, 0x64, 0x54, 0x9f, 0xe5, 0x74, 0x62, + 0x52, 0x9c, 0xd0, 0x5e, 0x7c, 0x11, 0xa6, 0x63, 0x9d, 0x8d, 0xf2, 0x31, 0x43, 0x63, 0xe5, 0xc1, + 0xa7, 0x4b, 0xe7, 0x3e, 0xfe, 0x74, 0xe9, 0xdc, 0x27, 0x9f, 0x2e, 0x9d, 0xfb, 0xc1, 0xe1, 0x92, + 0xf6, 0xe0, 0x70, 0x49, 0xfb, 0xf8, 0x70, 0x49, 0xfb, 0xe4, 0x70, 0x49, 0xfb, 0xfb, 0xe1, 0x92, + 0xf6, 0xd3, 0xcf, 0x96, 0xce, 0xbd, 0xfd, 0xd8, 0x91, 0xff, 0xb7, 0xe1, 0xdf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x5f, 0xd8, 0x14, 0x50, 0xfb, 0x30, 0x00, 0x00, } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { @@ -2757,48 +2288,6 @@ func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2881,64 +2370,6 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3126,7 +2557,7 @@ func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressRule) Marshal() (dAtA []byte, err error) { +func (m *IngressLoadBalancerIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3136,35 +2567,44 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) i-- dAtA[i] = 0x12 - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { +func (m *IngressLoadBalancerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3174,32 +2614,34 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IngressLoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *IngressSpec) Marshal() (dAtA []byte, err error) { +func (m *IngressPortStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3209,40 +2651,151 @@ func (m *IngressSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *IngressPortStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IngressPortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.IngressClassName != nil { - i -= len(*m.IngressClassName) - copy(dAtA[i:], *m.IngressClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1a } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *IngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.TLS) > 0 { - for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { - { + return dAtA[:n], nil +} + +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IngressClassName != nil { + i -= len(*m.IngressClassName) + copy(dAtA[i:], *m.IngressClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TLS) > 0 { + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err @@ -3707,7 +3260,7 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3717,16 +3270,26 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -3750,7 +3313,60 @@ func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3760,12 +3376,12 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -3797,7 +3413,7 @@ func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3807,49 +3423,32 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.RunAsGroup != nil { + i-- + dAtA[i] = 0x1a + if m.Selector != nil { { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3857,47 +3456,40 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } + dAtA[i] = 0x12 } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3905,183 +3497,148 @@ func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x78 - } - i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x32 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { +func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4091,12 +3648,12 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -4134,7 +3691,7 @@ func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4144,50 +3701,23 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4197,653 +3727,695 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x12 } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return base +} +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return n } -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n } -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa + return n +} + +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressPortStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return n } -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 } - dAtA[offset] = uint8(v) - return base + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *AllowedCSIDriver) Size() (n int) { + +func (m *NetworkPolicyPeer) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *AllowedFlexVolume) Size() (n int) { +func (m *NetworkPolicyPort) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EndPort != nil { + n += 1 + sovGenerated(uint64(*m.EndPort)) + } return n } -func (m *AllowedHostPath) Size() (n int) { +func (m *NetworkPolicySpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.PathPrefix) + l = m.PodSelector.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 2 + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *DaemonSet) Size() (n int) { +func (m *ReplicaSet) Size() (n int) { if m == nil { return 0 } @@ -4858,7 +4430,7 @@ func (m *DaemonSet) Size() (n int) { return n } -func (m *DaemonSetCondition) Size() (n int) { +func (m *ReplicaSetCondition) Size() (n int) { if m == nil { return 0 } @@ -4877,7 +4449,7 @@ func (m *DaemonSetCondition) Size() (n int) { return n } -func (m *DaemonSetList) Size() (n int) { +func (m *ReplicaSetList) Size() (n int) { if m == nil { return 0 } @@ -4894,45 +4466,36 @@ func (m *DaemonSetList) Size() (n int) { return n } -func (m *DaemonSetSpec) Size() (n int) { +func (m *ReplicaSetSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } if m.Selector != nil { l = m.Selector.Size() n += 1 + l + sovGenerated(uint64(l)) } l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - n += 1 + sovGenerated(uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } return n } -func (m *DaemonSetStatus) Size() (n int) { +func (m *ReplicaSetStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { for _, e := range m.Conditions { l = e.Size() @@ -4942,3103 +4505,772 @@ func (m *DaemonSetStatus) Size() (n int) { return n } -func (m *DaemonSetUpdateStrategy) Size() (n int) { +func (m *RollbackConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Deployment) Size() (n int) { +func (m *RollingUpdateDeployment) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *DeploymentCondition) Size() (n int) { +func (m *Scale) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentList) Size() (n int) { +func (m *ScaleSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + n += 1 + sovGenerated(uint64(m.Replicas)) return n } -func (m *DeploymentRollback) Size() (n int) { +func (m *ScaleStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { _ = k _ = v mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - l = m.RollbackTo.Size() + l = len(m.TargetSelector) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DeploymentSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *DeploymentStatus) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s } - -func (m *DeploymentStrategy) Size() (n int) { - if m == nil { - return 0 +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *FSGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s } - -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 +func (this *Deployment) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IDRange) Size() (n int) { - if m == nil { - return 0 + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) } - return n + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *Ingress) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s } - -func (m *IngressBackend) Size() (n int) { - if m == nil { - return 0 +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s } - -func (m *IngressList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s } - -func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s } - -func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } - return n + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s } - -func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 +func (this *IPBlock) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressList) String() string { + if this == nil { + return "nil" } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s } - -func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *IngressRule) String() string { + if this == nil { + return "nil" } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 +func (this *IngressSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," } - return n + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 +func (this *IngressStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," } - if m.EndPort != nil { - n += 1 + sovGenerated(uint64(*m.EndPort)) + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," } - return n + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s } - -func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 3 - } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," } - return n + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," } - return n + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," } - return n + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s } - -func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") return s } -func (this *DaemonSetStatus) String() string { +func (this *RollingUpdateDaemonSet) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Deployment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, - `EndPort:` + valueToStringGenerated(this.EndPort) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) - } - m.TemplateGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TemplateGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } -func (m *Deployment) Unmarshal(dAtA []byte) error { +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8061,10 +5293,10 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -8187,7 +5419,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8210,10 +5442,10 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -8246,7 +5478,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -8267,202 +5499,22 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { - return io.ErrUnexpectedEOF + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8489,15 +5541,15 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8507,25 +5559,55 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8548,7 +5630,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8571,47 +5653,15 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8638,107 +5688,13 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8765,7 +5721,8 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8790,7 +5747,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8813,33 +5770,13 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } @@ -8875,7 +5812,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } @@ -8908,9 +5845,9 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8937,11 +5874,11 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } @@ -8960,51 +5897,11 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) } - var msglen int + m.TemplateGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9014,31 +5911,14 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.TemplateGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } var v int32 for shift := uint(0); ; shift += 7 { @@ -9055,7 +5935,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { break } } - m.ProgressDeadlineSeconds = &v + m.RevisionHistoryLimit = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9077,7 +5957,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9100,17 +5980,17 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) } - m.ObservedGeneration = 0 + m.CurrentNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9120,16 +6000,16 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.CurrentNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) } - m.Replicas = 0 + m.NumberMisscheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9139,16 +6019,16 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.NumberMisscheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) } - m.UpdatedReplicas = 0 + m.DesiredNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9158,16 +6038,16 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.DesiredNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) } - m.AvailableReplicas = 0 + m.NumberReady = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9177,16 +6057,16 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.NumberReady |= int32(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - m.UnavailableReplicas = 0 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9196,16 +6076,16 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) } - var msglen int + m.UpdatedNumberScheduled = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9215,31 +6095,16 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) } - m.ReadyReplicas = 0 + m.NumberAvailable = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9249,12 +6114,31 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.NumberAvailable |= int32(b&0x7F) << shift if b < 0x80 { break } } case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) } @@ -9274,6 +6158,40 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9295,7 +6213,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9318,10 +6236,10 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -9354,7 +6272,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -9386,7 +6304,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} + m.RollingUpdate = &RollingUpdateDaemonSet{} } if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -9413,7 +6331,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *Deployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9436,17 +6354,17 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9456,27 +6374,61 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9503,8 +6455,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9529,7 +6480,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9552,15 +6503,15 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9588,13 +6539,13 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9604,28 +6555,27 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9653,62 +6603,76 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) } - if iNdEx >= l { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9735,8 +6699,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9761,7 +6724,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostPortRange) Unmarshal(dAtA []byte) error { +func (m *DeploymentList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9784,17 +6747,17 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - m.Min = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9804,16 +6767,30 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - m.Max = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9823,11 +6800,26 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9849,7 +6841,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } return nil } -func (m *IDRange) Unmarshal(dAtA []byte) error { +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9872,36 +6864,17 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - m.Max = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9911,66 +6884,29 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9980,29 +6916,124 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CIDR = string(dAtA[iNdEx:postIndex]) + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10012,23 +7043,24 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10051,7 +7083,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10074,15 +7106,35 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10109,13 +7161,16 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10142,13 +7197,13 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10175,65 +7230,34 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressBackend) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10243,27 +7267,35 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.Paused = bool(v != 0) + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10290,15 +7322,18 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10308,28 +7343,12 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ProgressDeadlineSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10351,7 +7370,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10374,17 +7393,17 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var msglen int + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10394,30 +7413,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var msglen int + m.Replicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10427,81 +7432,73 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10511,29 +7508,50 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - var msglen int + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10543,25 +7561,12 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.CollisionCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10583,7 +7588,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10606,15 +7611,47 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10641,10 +7678,10 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10669,7 +7706,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10692,17 +7729,17 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10712,31 +7749,27 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10763,48 +7796,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10832,8 +7830,8 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s iNdEx = postIndex default: iNdEx = preIndex @@ -10856,7 +7854,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10879,15 +7877,15 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10914,7 +7912,8 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10939,7 +7938,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *IPBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10962,15 +7961,15 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10998,11 +7997,11 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + m.CIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11030,7 +8029,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretName = string(dAtA[iNdEx:postIndex]) + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -11053,7 +8052,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { +func (m *Ingress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11076,10 +8075,10 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11115,92 +8114,9 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11227,14 +8143,13 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11261,8 +8176,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11287,7 +8201,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { +func (m *IngressBackend) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11310,15 +8224,47 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11345,14 +8291,13 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11379,8 +8324,10 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11405,7 +8352,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11428,10 +8375,10 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -11496,7 +8443,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, NetworkPolicy{}) + m.Items = append(m.Items, Ingress{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -11522,7 +8469,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11545,17 +8492,17 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11565,33 +8512,29 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11601,31 +8544,27 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11652,10 +8591,8 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} - } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11680,7 +8617,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11703,48 +8640,15 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11771,33 +8675,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Port == nil { - m.Port = &intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11819,7 +8701,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11842,17 +8724,17 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var msglen int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11862,64 +8744,16 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11929,29 +8763,27 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11979,7 +8811,8 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -12002,7 +8835,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12025,17 +8858,17 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12045,28 +8878,27 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12093,7 +8925,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12118,7 +8950,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12141,15 +8973,15 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12176,41 +9008,10 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12235,7 +9036,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12258,37 +9059,17 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12298,29 +9079,33 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12330,29 +9115,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12362,27 +9149,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12410,105 +9199,62 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.HostIPC = bool(v != 0) - case 10: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12535,48 +9281,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 12: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12586,30 +9349,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 13: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12619,90 +9381,77 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12729,14 +9478,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12763,16 +9511,65 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12782,29 +9579,31 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 20: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12814,29 +9613,81 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 21: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12846,27 +9697,29 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12893,16 +9746,64 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12929,14 +9830,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12963,10 +9863,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12991,7 +9889,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13014,15 +9912,15 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13049,13 +9947,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13082,13 +9983,16 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13115,7 +10019,10 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13140,7 +10047,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13163,15 +10070,15 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13199,43 +10106,12 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13262,47 +10138,18 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.Port == nil { + m.Port = &intstr.IntOrString{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13312,24 +10159,12 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.EndPort = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13351,7 +10186,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13374,15 +10209,15 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13409,13 +10244,13 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13442,11 +10277,77 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13468,7 +10369,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13491,17 +10392,17 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13511,15 +10412,28 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13546,16 +10460,13 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13582,29 +10493,10 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13626,7 +10518,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13649,17 +10541,17 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Replicas = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13669,35 +10561,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.ObservedGeneration = 0 + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13707,35 +10593,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } - m.AvailableReplicas = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13745,16 +10625,30 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13764,81 +10658,29 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - m.Revision = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13848,11 +10690,24 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13874,7 +10729,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13897,15 +10752,15 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13932,16 +10787,13 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13968,10 +10820,8 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13996,7 +10846,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14019,15 +10869,35 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14054,16 +10924,16 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14090,13 +10960,29 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14118,7 +11004,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14141,17 +11027,74 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) } - var stringLen uint64 + m.ReadyReplicas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14161,27 +11104,33 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14208,8 +11157,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14234,7 +11183,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14257,49 +11206,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var msglen int + m.Revision = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14309,26 +11226,11 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14350,7 +11252,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14373,17 +11275,17 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14393,29 +11295,33 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14425,24 +11331,27 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14465,7 +11374,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14488,17 +11397,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14508,27 +11417,31 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14555,10 +11468,10 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15029,122 +11942,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 737a7e79c..9bbcaa0e2 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -28,38 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} +option go_package = "k8s.io/api/extensions/v1beta1"; // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. @@ -68,7 +37,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -94,7 +63,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -110,7 +79,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -123,14 +92,14 @@ message DaemonSetSpec { // If empty, defaulted to labels on Pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -207,6 +176,8 @@ message DaemonSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DaemonSetCondition conditions = 10; } @@ -234,7 +205,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -254,10 +225,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -270,7 +241,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -300,10 +271,10 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -374,6 +345,8 @@ message DeploymentStatus { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated DeploymentCondition conditions = 6; // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -398,19 +371,6 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { @@ -450,43 +410,24 @@ message HTTPIngressPath { // or '#'. message HTTPIngressRuleValue { // A collection of paths that map requests to backends. + // +listType=atomic repeated HTTPIngressPath paths = 1; } -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the CIDR range // +optional + // +listType=atomic repeated string except = 2; } @@ -499,7 +440,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -520,13 +461,13 @@ message IngressBackend { // Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; // Resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressList is a collection of Ingress. @@ -534,12 +475,60 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Ingress. repeated Ingress items = 2; } +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +message IngressLoadBalancerIngress { + // IP is set for load-balancer ingress points that are IP based. + // +optional + optional string ip = 1; + + // Hostname is set for load-balancer ingress points that are DNS based. + // +optional + optional string hostname = 2; + + // Ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + repeated IngressPortStatus ports = 4; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message IngressLoadBalancerStatus { + // Ingress is a list containing ingress points for the load-balancer. + // +optional + // +listType=atomic + repeated IngressLoadBalancerIngress ingress = 1; +} + +// IngressPortStatus represents the error condition of a service port +message IngressPortStatus { + // Port is the port number of the ingress port. + optional int32 port = 1; + + // Protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. @@ -620,11 +609,13 @@ message IngressSpec { // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic repeated IngressTLS tls = 2; // A list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic repeated IngressRule rules = 3; } @@ -632,7 +623,7 @@ message IngressSpec { message IngressStatus { // LoadBalancer contains the current status of the load-balancer. // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; + optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. @@ -642,6 +633,7 @@ message IngressTLS { // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic repeated string hosts = 1; // SecretName is the name of the secret used to terminate SSL traffic on 443. @@ -659,7 +651,7 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior for this NetworkPolicy. // +optional @@ -677,6 +669,7 @@ message NetworkPolicyEgressRule { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; // List of destinations for outgoing traffic of pods selected for this rule. @@ -685,6 +678,7 @@ message NetworkPolicyEgressRule { // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic repeated NetworkPolicyPeer to = 2; } @@ -697,6 +691,7 @@ message NetworkPolicyIngressRule { // If this field is present and contains at least one item, then this rule allows traffic // only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; // List of sources which should be able to access the pods selected for this rule. @@ -705,6 +700,7 @@ message NetworkPolicyIngressRule { // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional + // +listType=atomic repeated NetworkPolicyPeer from = 2; } @@ -714,7 +710,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -729,7 +725,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. @@ -738,7 +734,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; // IPBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. @@ -758,14 +754,12 @@ message NetworkPolicyPort { // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // If set, indicates that the range of ports from port to endPort, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional optional int32 endPort = 3; } @@ -777,7 +771,7 @@ message NetworkPolicySpec { // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // List of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod @@ -787,6 +781,7 @@ message NetworkPolicySpec { // If this field is empty then this NetworkPolicy does not allow any traffic // (and serves solely to ensure that the pods it selects are isolated by default). // +optional + // +listType=atomic repeated NetworkPolicyIngressRule ingress = 2; // List of egress rules to be applied to the selected pods. Outgoing traffic is @@ -797,6 +792,7 @@ message NetworkPolicySpec { // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. @@ -810,167 +806,10 @@ message NetworkPolicySpec { // an Egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated string policyTypes = 4; } -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -979,7 +818,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -1005,7 +844,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -1021,7 +860,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -1048,18 +887,18 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller optional int32 replicas = 1; @@ -1083,6 +922,8 @@ message ReplicaSetStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated ReplicaSetCondition conditions = 6; } @@ -1110,7 +951,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -1132,7 +973,7 @@ message RollingUpdateDaemonSet { // cause evictions during disruption. // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -1148,7 +989,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -1162,65 +1003,14 @@ message RollingUpdateDeployment { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -1243,7 +1033,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic map selector = 2; @@ -1258,16 +1048,3 @@ message ScaleStatus { optional string targetSelector = 3; } -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0bc..d58908edc 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -54,8 +54,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 963318a2f..09f58692f 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -35,7 +35,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional // +mapType=atomic Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` @@ -270,6 +270,8 @@ type DeploymentStatus struct { // Represents the latest available observations of a deployment's current state. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` // Count of hash collisions for the Deployment. The Deployment controller uses this @@ -490,6 +492,8 @@ type DaemonSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` } @@ -652,11 +656,13 @@ type IngressSpec struct { // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` // A list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // TODO: Add the ability to specify load-balancer IP through claims } @@ -668,6 +674,7 @@ type IngressTLS struct { // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` // SecretName is the name of the secret used to terminate SSL traffic on 443. // Field is left optional to allow SSL routing based on SNI hostname alone. @@ -683,7 +690,55 @@ type IngressTLS struct { type IngressStatus struct { // LoadBalancer contains the current status of the load-balancer. // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type IngressLoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer. + // +optional + // +listType=atomic + Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +type IngressLoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based. + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // Hostname is set for load-balancer ingress points that are DNS based. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // Ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` +} + +// IngressPortStatus represents the error condition of a service port +type IngressPortStatus struct { + // Port is the port number of the ingress port. + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + + // Protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // Error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } // IngressRule represents the rules mapping the paths under a specified host to @@ -720,7 +775,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -750,6 +805,7 @@ type IngressRuleValue struct { // or '#'. type HTTPIngressRuleValue struct { // A collection of paths that map requests to backends. + // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -920,7 +976,7 @@ type ReplicaSetSpec struct { // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. + // Replicas is the most recently observed number of replicas. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` @@ -944,6 +1000,8 @@ type ReplicaSetStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` } @@ -974,389 +1032,6 @@ type ReplicaSetCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -// Deprecated: use FSType from policy API Group instead. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - CSI FSType = "csi" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -// Deprecated: use SELinuxStrategy from policy API Group instead. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -// Deprecated: use RunAsUserStrategy from policy API Group instead. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -// Deprecated: use FSGroupStrategyType from policy API Group instead. -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.2 -// +k8s:prerelease-lifecycle-gen:deprecated=1.11 -// +k8s:prerelease-lifecycle-gen:removed=1.16 -// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.3 @@ -1376,6 +1051,11 @@ type NetworkPolicy struct { // Specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is tombstoned to show why 3 is a reserved protobuf tag. + // This commented field should remain, so in the future if we decide to reimplement + // NetworkPolicyStatus a different protobuf name and tag SHOULD be used! + // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. @@ -1407,6 +1087,7 @@ type NetworkPolicySpec struct { // If this field is empty then this NetworkPolicy does not allow any traffic // (and serves solely to ensure that the pods it selects are isolated by default). // +optional + // +listType=atomic Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` // List of egress rules to be applied to the selected pods. Outgoing traffic is @@ -1417,6 +1098,7 @@ type NetworkPolicySpec struct { // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. @@ -1430,6 +1112,7 @@ type NetworkPolicySpec struct { // an Egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } @@ -1442,6 +1125,7 @@ type NetworkPolicyIngressRule struct { // If this field is present and contains at least one item, then this rule allows traffic // only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` // List of sources which should be able to access the pods selected for this rule. @@ -1450,6 +1134,7 @@ type NetworkPolicyIngressRule struct { // If this field is present and contains at least one item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional + // +listType=atomic From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } @@ -1464,6 +1149,7 @@ type NetworkPolicyEgressRule struct { // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` // List of destinations for outgoing traffic of pods selected for this rule. @@ -1472,6 +1158,7 @@ type NetworkPolicyEgressRule struct { // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` } @@ -1493,24 +1180,23 @@ type NetworkPolicyPort struct { // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"` } // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" // Except values will be rejected if they are outside the CIDR range // +optional + // +listType=atomic Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index d70303fa8..408022c9d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -24,37 +24,9 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -220,16 +192,6 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { return map_DeploymentStrategy } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", @@ -250,30 +212,10 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_IPBlock = map[string]string{ - "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the CIDR range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -312,6 +254,37 @@ func (IngressList) SwaggerDoc() map[string]string { return map_IngressList } +var map_IngressLoadBalancerIngress = map[string]string{ + "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "ip": "IP is set for load-balancer ingress points that are IP based.", + "hostname": "Hostname is set for load-balancer ingress points that are DNS based.", + "ports": "Ports provides information about the ports exposed by this LoadBalancer.", +} + +func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerIngress +} + +var map_IngressLoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "Ingress is a list containing ingress points for the load-balancer.", +} + +func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerStatus +} + +var map_IngressPortStatus = map[string]string{ + "": "IngressPortStatus represents the error condition of a service port", + "port": "Port is the port number of the ingress port.", + "protocol": "Protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (IngressPortStatus) SwaggerDoc() map[string]string { + return map_IngressPortStatus +} + var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", @@ -416,7 +389,7 @@ var map_NetworkPolicyPort = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", "protocol": "Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".", + "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -435,58 +408,6 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { return map_NetworkPolicySpec } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -535,7 +456,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "replicas": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "The number of ready replicas for this replica set.", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", @@ -576,46 +497,6 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", @@ -639,7 +520,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "selector": "selector is a label query over pods that should match the replicas count. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", } @@ -647,14 +528,4 @@ func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index e0961588e..6b474ae48 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,54 +28,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { *out = *in @@ -435,27 +387,6 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in @@ -501,38 +432,6 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -637,6 +536,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]IngressPortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress. +func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress { + if in == nil { + return nil + } + out := new(IngressLoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressLoadBalancerIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus. +func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus { + if in == nil { + return nil + } + out := new(IngressLoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus. +func (in *IngressPortStatus) DeepCopy() *IngressPortStatus { + if in == nil { + return nil + } + out := new(IngressPortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressRule) DeepCopyInto(out *IngressRule) { *out = *in @@ -971,161 +937,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { *out = *in @@ -1322,95 +1133,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Scale) DeepCopyInto(out *Scale) { *out = *in @@ -1477,24 +1199,3 @@ func (in *ScaleStatus) DeepCopy() *ScaleStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go index 963aaffba..5c9354228 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go @@ -235,54 +235,6 @@ func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) { return 1, 16 } -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 2 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 11 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicyList"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 16 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/api/flowcontrol/v1/doc.go similarity index 77% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go rename to vendor/k8s.io/api/flowcontrol/v1/doc.go index a3d4d0c60..c9e7db158 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,5 +21,5 @@ limitations under the License. // +groupName=flowcontrol.apiserver.k8s.io -// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". -package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1" +// Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io". +package v1 // import "k8s.io/api/flowcontrol/v1" diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go similarity index 86% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/flowcontrol/v1/generated.pb.go index 7f0687ac0..b342445f7 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +// source: k8s.io/api/flowcontrol/v1/generated.proto -package v1alpha1 +package v1 import ( fmt "fmt" @@ -43,10 +43,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_5d08a1401821035d, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{0} + return fileDescriptor_5d08a1401821035d, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{1} + return fileDescriptor_5d08a1401821035d, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{2} + return fileDescriptor_5d08a1401821035d, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{3} + return fileDescriptor_5d08a1401821035d, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{4} + return fileDescriptor_5d08a1401821035d, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{5} + return fileDescriptor_5d08a1401821035d, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{6} + return fileDescriptor_5d08a1401821035d, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{7} + return fileDescriptor_5d08a1401821035d, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{8} + return fileDescriptor_5d08a1401821035d, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{9} + return fileDescriptor_5d08a1401821035d, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{10} + return fileDescriptor_5d08a1401821035d, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{11} + return fileDescriptor_5d08a1401821035d, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{12} + return fileDescriptor_5d08a1401821035d, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{13} + return fileDescriptor_5d08a1401821035d, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{14} + return fileDescriptor_5d08a1401821035d, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{15} + return fileDescriptor_5d08a1401821035d, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{16} + return fileDescriptor_5d08a1401821035d, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{17} + return fileDescriptor_5d08a1401821035d, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{18} + return fileDescriptor_5d08a1401821035d, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{19} + return fileDescriptor_5d08a1401821035d, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{20} + return fileDescriptor_5d08a1401821035d, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_45ba024d525b289b, []int{21} + return fileDescriptor_5d08a1401821035d, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,130 +688,169 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { - proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod") - proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema") - proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition") - proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaList") - proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec") - proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus") - proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject") - proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse") - proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration") - proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule") - proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects") - proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration") - proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition") - proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationList") - proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference") - proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec") - proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus") - proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.QueuingConfiguration") - proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.ResourcePolicyRule") - proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.ServiceAccountSubject") - proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1alpha1.Subject") - proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.UserSubject") + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.ExemptPriorityLevelConfiguration") + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1.UserSubject") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto", fileDescriptor_45ba024d525b289b) -} - -var fileDescriptor_45ba024d525b289b = []byte{ - // 1502 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4d, 0x6f, 0xdb, 0x46, - 0x13, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x59, 0x27, 0xb0, 0xe0, 0x00, 0x92, 0xc3, 0x17, 0x78, - 0x93, 0xf7, 0x4d, 0x42, 0xc6, 0x69, 0x92, 0xa6, 0x08, 0x8a, 0xc0, 0x74, 0xda, 0x7c, 0xd9, 0xae, - 0xbd, 0x4e, 0x52, 0x34, 0x48, 0x81, 0xd0, 0xd4, 0x5a, 0xda, 0x58, 0x22, 0x59, 0x2e, 0xa9, 0xd4, - 0x45, 0x0e, 0x05, 0xfa, 0x07, 0xfa, 0x03, 0x72, 0xec, 0xa1, 0xe7, 0xfe, 0x82, 0x1e, 0x8d, 0xa2, - 0x87, 0x1c, 0x73, 0x12, 0x62, 0xf5, 0x5a, 0xf4, 0xdc, 0xe6, 0x54, 0xec, 0x72, 0x49, 0x8a, 0xfa, - 0xb0, 0x94, 0x1a, 0xc8, 0xa9, 0x37, 0x71, 0x3e, 0x9e, 0xd9, 0x99, 0x9d, 0x99, 0x7d, 0x04, 0x77, - 0xf6, 0xae, 0x33, 0x8d, 0x3a, 0xfa, 0x5e, 0xb0, 0x43, 0x3c, 0x9b, 0xf8, 0x84, 0xe9, 0x0d, 0x62, - 0x97, 0x1d, 0x4f, 0x97, 0x0a, 0xd3, 0xa5, 0xfa, 0x6e, 0xcd, 0x79, 0x6e, 0x39, 0xb6, 0xef, 0x39, - 0x35, 0xbd, 0xb1, 0x6c, 0xd6, 0xdc, 0xaa, 0xb9, 0xac, 0x57, 0x88, 0x4d, 0x3c, 0xd3, 0x27, 0x65, - 0xcd, 0xf5, 0x1c, 0xdf, 0x41, 0xa5, 0xd0, 0x41, 0x33, 0x5d, 0xaa, 0xb5, 0x39, 0x68, 0x91, 0xc3, - 0xe2, 0xc5, 0x0a, 0xf5, 0xab, 0xc1, 0x8e, 0x66, 0x39, 0x75, 0xbd, 0xe2, 0x54, 0x1c, 0x5d, 0xf8, - 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x21, 0xde, 0xe2, 0x95, 0xe4, 0x00, 0x75, 0xd3, - 0xaa, 0x52, 0x9b, 0x78, 0xfb, 0xba, 0xbb, 0x57, 0xe1, 0x02, 0xa6, 0xd7, 0x89, 0x6f, 0xea, 0x8d, - 0xae, 0x53, 0x2c, 0xea, 0xfd, 0xbc, 0xbc, 0xc0, 0xf6, 0x69, 0x9d, 0x74, 0x39, 0x5c, 0x1b, 0xe4, - 0xc0, 0xac, 0x2a, 0xa9, 0x9b, 0x9d, 0x7e, 0xea, 0x63, 0x58, 0xf8, 0xb4, 0xe6, 0x3c, 0xbf, 0x45, - 0x99, 0x4f, 0xed, 0x4a, 0x40, 0x59, 0x95, 0x78, 0xeb, 0xc4, 0xaf, 0x3a, 0x65, 0x74, 0x13, 0xb2, - 0xfe, 0xbe, 0x4b, 0x0a, 0xca, 0x92, 0x72, 0x2e, 0x6f, 0x9c, 0x3f, 0x68, 0x96, 0x46, 0x5a, 0xcd, - 0x52, 0xf6, 0xc1, 0xbe, 0x4b, 0xde, 0x36, 0x4b, 0xa7, 0xfb, 0xb8, 0x71, 0x35, 0x16, 0x8e, 0xea, - 0xcb, 0x0c, 0x00, 0xb7, 0xda, 0x16, 0xa1, 0xd1, 0x53, 0x98, 0xe0, 0xe9, 0x96, 0x4d, 0xdf, 0x14, - 0x98, 0x93, 0x97, 0x2f, 0x69, 0x49, 0xb1, 0xe3, 0x53, 0x6b, 0xee, 0x5e, 0x85, 0x0b, 0x98, 0xc6, - 0xad, 0xb5, 0xc6, 0xb2, 0xf6, 0xd9, 0xce, 0x33, 0x62, 0xf9, 0xeb, 0xc4, 0x37, 0x0d, 0x24, 0x4f, - 0x01, 0x89, 0x0c, 0xc7, 0xa8, 0x68, 0x0b, 0xb2, 0xcc, 0x25, 0x56, 0x21, 0x23, 0xd0, 0x75, 0x6d, - 0xc0, 0x55, 0x6a, 0xc9, 0xe1, 0xb6, 0x5d, 0x62, 0x19, 0x53, 0x51, 0x8a, 0xfc, 0x0b, 0x0b, 0x28, - 0xf4, 0x05, 0x8c, 0x31, 0xdf, 0xf4, 0x03, 0x56, 0x18, 0x15, 0xa0, 0xcb, 0xef, 0x02, 0x2a, 0x1c, - 0x8d, 0x19, 0x09, 0x3b, 0x16, 0x7e, 0x63, 0x09, 0xa8, 0xbe, 0xce, 0xc0, 0x7c, 0x62, 0xbc, 0xea, - 0xd8, 0x65, 0xea, 0x53, 0xc7, 0x46, 0x37, 0x52, 0x75, 0x3f, 0xdb, 0x51, 0xf7, 0x85, 0x1e, 0x2e, - 0x49, 0xcd, 0xd1, 0x47, 0xf1, 0x79, 0x33, 0xc2, 0xfd, 0x4c, 0x3a, 0xf8, 0xdb, 0x66, 0x69, 0x36, - 0x76, 0x4b, 0x9f, 0x07, 0x35, 0x00, 0xd5, 0x4c, 0xe6, 0x3f, 0xf0, 0x4c, 0x9b, 0x85, 0xb0, 0xb4, - 0x4e, 0x64, 0xda, 0xff, 0x1f, 0xee, 0xa6, 0xb8, 0x87, 0xb1, 0x28, 0x43, 0xa2, 0xb5, 0x2e, 0x34, - 0xdc, 0x23, 0x02, 0xfa, 0x2f, 0x8c, 0x79, 0xc4, 0x64, 0x8e, 0x5d, 0xc8, 0x8a, 0x23, 0xc7, 0xf5, - 0xc2, 0x42, 0x8a, 0xa5, 0x16, 0xfd, 0x0f, 0xc6, 0xeb, 0x84, 0x31, 0xb3, 0x42, 0x0a, 0x39, 0x61, - 0x38, 0x2b, 0x0d, 0xc7, 0xd7, 0x43, 0x31, 0x8e, 0xf4, 0xea, 0xcf, 0x0a, 0xcc, 0x24, 0x75, 0x5a, - 0xa3, 0xcc, 0x47, 0x4f, 0xba, 0xba, 0x4f, 0x1b, 0x2e, 0x27, 0xee, 0x2d, 0x7a, 0x6f, 0x4e, 0x86, - 0x9b, 0x88, 0x24, 0x6d, 0x9d, 0xb7, 0x09, 0x39, 0xea, 0x93, 0x3a, 0xaf, 0xfa, 0xe8, 0xb9, 0xc9, - 0xcb, 0xe7, 0xdf, 0xa1, 0x4b, 0x8c, 0x69, 0x89, 0x9b, 0xbb, 0xcb, 0x11, 0x70, 0x08, 0xa4, 0xfe, - 0x3e, 0xda, 0x9e, 0x02, 0xef, 0x48, 0xf4, 0xa3, 0x02, 0x8b, 0xae, 0x47, 0x1d, 0x8f, 0xfa, 0xfb, - 0x6b, 0xa4, 0x41, 0x6a, 0xab, 0x8e, 0xbd, 0x4b, 0x2b, 0x81, 0x67, 0xf2, 0x5a, 0xca, 0xac, 0x6e, - 0x0d, 0x0c, 0xbd, 0xd9, 0x17, 0x02, 0x93, 0x5d, 0xe2, 0x11, 0xdb, 0x22, 0x86, 0x2a, 0xcf, 0xb4, - 0x78, 0x84, 0xf1, 0x11, 0x67, 0x41, 0xf7, 0x00, 0xd5, 0x4d, 0x9f, 0xd7, 0xb4, 0xb2, 0xe9, 0x11, - 0x8b, 0x94, 0x39, 0xaa, 0x68, 0xc9, 0x5c, 0xd2, 0x1f, 0xeb, 0x5d, 0x16, 0xb8, 0x87, 0x17, 0xfa, - 0x4e, 0x81, 0xf9, 0x72, 0xf7, 0xa2, 0x91, 0x9d, 0x79, 0x7d, 0xa8, 0x52, 0xf7, 0x58, 0x54, 0xc6, - 0x42, 0xab, 0x59, 0x9a, 0xef, 0xa1, 0xc0, 0xbd, 0xa2, 0xa1, 0x2f, 0x21, 0xe7, 0x05, 0x35, 0xc2, - 0x0a, 0x59, 0x71, 0xc3, 0x83, 0xc3, 0x6e, 0x3a, 0x35, 0x6a, 0xed, 0x63, 0xee, 0xf3, 0x39, 0xf5, - 0xab, 0xdb, 0x81, 0xd8, 0x58, 0x2c, 0xb9, 0x6e, 0xa1, 0xc2, 0x21, 0xaa, 0xfa, 0x02, 0xe6, 0x3a, - 0x17, 0x07, 0xaa, 0x02, 0x58, 0xd1, 0xac, 0xb2, 0x82, 0x22, 0xe2, 0x5e, 0x79, 0x87, 0xce, 0x8a, - 0x07, 0x3d, 0x59, 0x9b, 0xb1, 0x88, 0xe1, 0x36, 0x6c, 0xf5, 0x12, 0x4c, 0xdd, 0xf6, 0x9c, 0xc0, - 0x95, 0x87, 0x44, 0x4b, 0x90, 0xb5, 0xcd, 0x7a, 0xb4, 0x82, 0xe2, 0xbd, 0xb8, 0x61, 0xd6, 0x09, - 0x16, 0x1a, 0xf5, 0x07, 0x05, 0xa6, 0xd7, 0x68, 0x9d, 0xfa, 0x98, 0x30, 0xd7, 0xb1, 0x19, 0x41, - 0x57, 0x53, 0x6b, 0xeb, 0x4c, 0xc7, 0xda, 0x3a, 0x91, 0x32, 0x6e, 0x5b, 0x58, 0x4f, 0x60, 0xfc, - 0xab, 0x80, 0x04, 0xd4, 0xae, 0xc8, 0xb5, 0x7d, 0x75, 0x60, 0x86, 0x5b, 0xa1, 0x7d, 0xaa, 0xe3, - 0x8c, 0x49, 0xbe, 0x08, 0xa4, 0x06, 0x47, 0x90, 0xea, 0x1f, 0x0a, 0x9c, 0x11, 0x91, 0x49, 0xb9, - 0x7f, 0x27, 0xa3, 0x27, 0x50, 0x30, 0x19, 0x0b, 0x3c, 0x52, 0x5e, 0x75, 0x6c, 0x2b, 0xf0, 0xf8, - 0x0c, 0xec, 0x6f, 0x57, 0x4d, 0x8f, 0x30, 0x91, 0x4e, 0xce, 0x58, 0x92, 0xe9, 0x14, 0x56, 0xfa, - 0xd8, 0xe1, 0xbe, 0x08, 0x68, 0x0f, 0xa6, 0x6b, 0xed, 0xc9, 0xcb, 0x3c, 0xb5, 0x81, 0x79, 0xa6, - 0x4a, 0x66, 0x9c, 0x92, 0x47, 0x48, 0x97, 0x1d, 0xa7, 0xb1, 0xd5, 0xe7, 0x70, 0x6a, 0x83, 0x0f, - 0x32, 0x73, 0x02, 0xcf, 0x22, 0x49, 0x0f, 0xa2, 0x12, 0xe4, 0x1a, 0xc4, 0xdb, 0x09, 0xfb, 0x28, - 0x6f, 0xe4, 0x79, 0x07, 0x3e, 0xe2, 0x02, 0x1c, 0xca, 0xd1, 0xc7, 0x30, 0x6b, 0x27, 0x9e, 0x0f, - 0xf1, 0x1a, 0x2b, 0x8c, 0x09, 0xd3, 0xf9, 0x56, 0xb3, 0x34, 0xbb, 0x91, 0x56, 0xe1, 0x4e, 0x5b, - 0xf5, 0x30, 0x03, 0x0b, 0x7d, 0x5a, 0x1e, 0x3d, 0x82, 0x09, 0x26, 0x7f, 0xcb, 0x36, 0x3e, 0x37, - 0x30, 0x79, 0xe9, 0x9c, 0x6c, 0xdd, 0x08, 0x0d, 0xc7, 0x58, 0xc8, 0x85, 0x69, 0x4f, 0x9e, 0x41, - 0x04, 0x95, 0xdb, 0xf7, 0x83, 0x81, 0xe0, 0xdd, 0xf5, 0x49, 0xca, 0x8b, 0xdb, 0x11, 0x71, 0x3a, - 0x00, 0x7a, 0x01, 0x73, 0x6d, 0x89, 0x87, 0x41, 0x47, 0x45, 0xd0, 0x6b, 0x03, 0x83, 0xf6, 0xbc, - 0x17, 0xa3, 0x20, 0xe3, 0xce, 0x6d, 0x74, 0xe0, 0xe2, 0xae, 0x48, 0xea, 0xaf, 0x19, 0x38, 0x62, - 0x21, 0xbf, 0x07, 0x82, 0x65, 0xa6, 0x08, 0xd6, 0xcd, 0x63, 0x3c, 0x35, 0x7d, 0x09, 0x17, 0xed, - 0x20, 0x5c, 0x2b, 0xc7, 0x09, 0x72, 0x34, 0x01, 0xfb, 0x33, 0x03, 0xff, 0xe9, 0xef, 0x9c, 0x10, - 0xb2, 0xfb, 0xa9, 0xcd, 0xf6, 0x61, 0xc7, 0x66, 0x3b, 0x3b, 0x04, 0xc4, 0xbf, 0x04, 0xad, 0x83, - 0xa0, 0xbd, 0x51, 0xa0, 0xd8, 0xbf, 0x6e, 0xef, 0x81, 0xb0, 0x3d, 0x4d, 0x13, 0xb6, 0x1b, 0xc7, - 0xe8, 0xb2, 0x3e, 0x04, 0xee, 0xf6, 0x51, 0xcd, 0x15, 0x33, 0xad, 0x21, 0x9e, 0xda, 0x83, 0x23, - 0x6b, 0x25, 0x98, 0xe1, 0x80, 0xbf, 0x0c, 0x29, 0xef, 0x4f, 0x6c, 0x73, 0xa7, 0x46, 0xea, 0xc4, - 0xf6, 0x65, 0x47, 0x52, 0x18, 0xaf, 0x85, 0x4f, 0xa4, 0x9c, 0x6b, 0x63, 0xb8, 0x97, 0xe9, 0xa8, - 0x27, 0x35, 0x7c, 0x8e, 0xa5, 0x19, 0x8e, 0xf0, 0xd5, 0x97, 0x0a, 0x2c, 0x0d, 0x1a, 0x57, 0xf4, - 0x75, 0x0f, 0xda, 0x73, 0x1c, 0x56, 0x3b, 0x3c, 0x0d, 0xfa, 0x49, 0x81, 0x93, 0xbd, 0xc8, 0x05, - 0x9f, 0x00, 0xce, 0x28, 0x62, 0x3a, 0x10, 0x4f, 0xc0, 0x96, 0x90, 0x62, 0xa9, 0x45, 0x17, 0x60, - 0xa2, 0x6a, 0xda, 0xe5, 0x6d, 0xfa, 0x4d, 0x44, 0x76, 0xe3, 0x1e, 0xbc, 0x23, 0xe5, 0x38, 0xb6, - 0x40, 0xb7, 0x60, 0x4e, 0xf8, 0xad, 0x11, 0xbb, 0xe2, 0x57, 0x45, 0xb1, 0xc4, 0x34, 0xe7, 0x92, - 0x47, 0x61, 0xab, 0x43, 0x8f, 0xbb, 0x3c, 0xd4, 0xbf, 0x14, 0x40, 0xff, 0xe4, 0xbd, 0x3f, 0x0f, - 0x79, 0xd3, 0xa5, 0x82, 0xf6, 0x85, 0x53, 0x90, 0x37, 0xa6, 0x5b, 0xcd, 0x52, 0x7e, 0x65, 0xf3, - 0x6e, 0x28, 0xc4, 0x89, 0x9e, 0x1b, 0x47, 0x0f, 0x61, 0xf8, 0xe0, 0x49, 0xe3, 0x28, 0x30, 0xc3, - 0x89, 0x1e, 0x5d, 0x87, 0x29, 0xab, 0x16, 0x30, 0x9f, 0x78, 0xdb, 0x96, 0xe3, 0x12, 0xb1, 0x35, - 0x26, 0x8c, 0x93, 0x32, 0xa7, 0xa9, 0xd5, 0x36, 0x1d, 0x4e, 0x59, 0x22, 0x0d, 0x80, 0xb7, 0x3c, - 0x73, 0x4d, 0x1e, 0x27, 0x27, 0xe2, 0xcc, 0xf0, 0x0b, 0xdb, 0x88, 0xa5, 0xb8, 0xcd, 0x42, 0x7d, - 0x06, 0xa7, 0xb6, 0x89, 0xd7, 0xa0, 0x16, 0x59, 0xb1, 0x2c, 0x27, 0xb0, 0xfd, 0x88, 0xc0, 0xea, - 0x90, 0x8f, 0xcd, 0xe4, 0x54, 0x9c, 0x90, 0xf1, 0xf3, 0x31, 0x16, 0x4e, 0x6c, 0xe2, 0x31, 0xcc, - 0xf4, 0x1d, 0xc3, 0x5f, 0x32, 0x30, 0x9e, 0xc0, 0x67, 0xf7, 0xa8, 0x5d, 0x96, 0xc8, 0xa7, 0x23, - 0xeb, 0xfb, 0xd4, 0x2e, 0xbf, 0x6d, 0x96, 0x26, 0xa5, 0x19, 0xff, 0xc4, 0xc2, 0x10, 0xdd, 0x83, - 0x6c, 0xc0, 0x88, 0x27, 0x07, 0xec, 0xc2, 0xc0, 0x6e, 0x7e, 0xc8, 0x88, 0x17, 0x31, 0xa0, 0x09, - 0x0e, 0xcd, 0x05, 0x58, 0x60, 0xa0, 0x0d, 0xc8, 0x55, 0xf8, 0xad, 0xc8, 0xcd, 0x7f, 0x71, 0x20, - 0x58, 0x3b, 0xb5, 0x0f, 0x1b, 0x41, 0x48, 0x70, 0x08, 0x83, 0x3c, 0x98, 0x61, 0xa9, 0x22, 0x8a, - 0x0b, 0x1b, 0x86, 0xd1, 0xf4, 0xac, 0xbd, 0x81, 0x5a, 0xcd, 0xd2, 0x4c, 0x5a, 0x85, 0x3b, 0x22, - 0xa8, 0x3a, 0x4c, 0xb6, 0xa5, 0x38, 0x78, 0x09, 0x1a, 0xda, 0xc1, 0x61, 0x71, 0xe4, 0xd5, 0x61, - 0x71, 0xe4, 0xf5, 0x61, 0x71, 0xe4, 0xdb, 0x56, 0x51, 0x39, 0x68, 0x15, 0x95, 0x57, 0xad, 0xa2, - 0xf2, 0xba, 0x55, 0x54, 0xde, 0xb4, 0x8a, 0xca, 0xf7, 0xbf, 0x15, 0x47, 0x1e, 0x4f, 0x44, 0x47, - 0xfb, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xb3, 0x17, 0x48, 0x11, 0x14, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_5d08a1401821035d) +} + +var fileDescriptor_5d08a1401821035d = []byte{ + // 1575 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x6f, 0xdb, 0x56, + 0x16, 0x36, 0x65, 0xc9, 0xb6, 0x8e, 0x9f, 0xb9, 0x8e, 0x61, 0xc5, 0x19, 0x48, 0x0e, 0x07, 0x93, + 0xc7, 0x64, 0x42, 0x25, 0xc6, 0x64, 0x26, 0x41, 0x66, 0x26, 0x08, 0x93, 0x4c, 0x5e, 0xb6, 0xe3, + 0x5c, 0xe5, 0x51, 0xa4, 0x05, 0x5a, 0x9a, 0xba, 0x96, 0x18, 0x8b, 0x8f, 0xf2, 0x92, 0x72, 0x5d, + 0xa0, 0x40, 0x7f, 0x42, 0x56, 0x5d, 0x76, 0xd1, 0xfe, 0x83, 0xae, 0x8a, 0x76, 0xd3, 0x65, 0x76, + 0xcd, 0x32, 0xed, 0x42, 0x68, 0xd4, 0xbf, 0xd0, 0x45, 0x9b, 0x55, 0x71, 0x2f, 0x2f, 0x49, 0x51, + 0x12, 0x69, 0xc1, 0x8b, 0x74, 0xd3, 0x9d, 0x79, 0xce, 0x77, 0xbe, 0x73, 0xef, 0xb9, 0xe7, 0x25, + 0xc3, 0x99, 0xdd, 0x4b, 0x54, 0x31, 0xec, 0xaa, 0xe6, 0x18, 0xd5, 0x9d, 0x96, 0xbd, 0xa7, 0xdb, + 0x96, 0xe7, 0xda, 0xad, 0x6a, 0xfb, 0x42, 0xb5, 0x41, 0x2c, 0xe2, 0x6a, 0x1e, 0xa9, 0x2b, 0x8e, + 0x6b, 0x7b, 0x36, 0x3a, 0x16, 0x40, 0x15, 0xcd, 0x31, 0x94, 0x1e, 0xa8, 0xd2, 0xbe, 0xb0, 0x72, + 0xae, 0x61, 0x78, 0x4d, 0x7f, 0x5b, 0xd1, 0x6d, 0xb3, 0xda, 0xb0, 0x1b, 0x76, 0x95, 0x5b, 0x6c, + 0xfb, 0x3b, 0xfc, 0x8b, 0x7f, 0xf0, 0xbf, 0x02, 0xa6, 0x95, 0x7f, 0xc6, 0x4e, 0x4d, 0x4d, 0x6f, + 0x1a, 0x16, 0x71, 0xf7, 0xab, 0xce, 0x6e, 0x83, 0x09, 0x68, 0xd5, 0x24, 0x9e, 0x36, 0xc4, 0xff, + 0x4a, 0x35, 0xcd, 0xca, 0xf5, 0x2d, 0xcf, 0x30, 0xc9, 0x80, 0xc1, 0xbf, 0x0e, 0x32, 0xa0, 0x7a, + 0x93, 0x98, 0x5a, 0xbf, 0x9d, 0xfc, 0xad, 0x04, 0xab, 0x37, 0x3f, 0x22, 0xa6, 0xe3, 0x6d, 0xb9, + 0x86, 0xed, 0x1a, 0xde, 0xfe, 0x3a, 0x69, 0x93, 0xd6, 0x75, 0xdb, 0xda, 0x31, 0x1a, 0xbe, 0xab, + 0x79, 0x86, 0x6d, 0xa1, 0x77, 0xa0, 0x64, 0xd9, 0xa6, 0x61, 0x69, 0x4c, 0xae, 0xfb, 0xae, 0x4b, + 0x2c, 0x7d, 0xbf, 0xd6, 0xd4, 0x5c, 0x42, 0x4b, 0xd2, 0xaa, 0x74, 0xba, 0xa0, 0xfe, 0xa5, 0xdb, + 0xa9, 0x94, 0x36, 0x53, 0x30, 0x38, 0xd5, 0x1a, 0xfd, 0x17, 0xe6, 0x5b, 0xc4, 0xaa, 0x6b, 0xdb, + 0x2d, 0xb2, 0x45, 0x5c, 0x9d, 0x58, 0x5e, 0x29, 0xc7, 0x09, 0x17, 0xbb, 0x9d, 0xca, 0xfc, 0x7a, + 0x52, 0x85, 0xfb, 0xb1, 0xf2, 0x53, 0x58, 0xfe, 0x7f, 0xcb, 0xde, 0xbb, 0x61, 0x50, 0xcf, 0xb0, + 0x1a, 0xbe, 0x41, 0x9b, 0xc4, 0xdd, 0x20, 0x5e, 0xd3, 0xae, 0xa3, 0xab, 0x90, 0xf7, 0xf6, 0x1d, + 0xc2, 0xcf, 0x57, 0x54, 0xcf, 0xbe, 0xe8, 0x54, 0xc6, 0xba, 0x9d, 0x4a, 0xfe, 0xe1, 0xbe, 0x43, + 0xde, 0x74, 0x2a, 0xc7, 0x53, 0xcc, 0x98, 0x1a, 0x73, 0x43, 0xf9, 0x79, 0x0e, 0x80, 0xa1, 0x6a, + 0x3c, 0x70, 0xe8, 0x03, 0x98, 0x62, 0x8f, 0x55, 0xd7, 0x3c, 0x8d, 0x73, 0x4e, 0xaf, 0x9d, 0x57, + 0xe2, 0x24, 0x89, 0x62, 0xae, 0x38, 0xbb, 0x0d, 0x26, 0xa0, 0x0a, 0x43, 0x2b, 0xed, 0x0b, 0xca, + 0xfd, 0xed, 0x67, 0x44, 0xf7, 0x36, 0x88, 0xa7, 0xa9, 0x48, 0x9c, 0x02, 0x62, 0x19, 0x8e, 0x58, + 0xd1, 0x3d, 0xc8, 0x53, 0x87, 0xe8, 0x3c, 0x00, 0xd3, 0x6b, 0x67, 0x94, 0xd4, 0x14, 0x54, 0xe2, + 0x63, 0xd5, 0x1c, 0xa2, 0xab, 0x33, 0xe1, 0xe5, 0xd8, 0x17, 0xe6, 0x24, 0xa8, 0x06, 0x13, 0xd4, + 0xd3, 0x3c, 0x9f, 0x96, 0xc6, 0x39, 0xdd, 0xd9, 0xd1, 0xe8, 0xb8, 0x89, 0x3a, 0x27, 0x08, 0x27, + 0x82, 0x6f, 0x2c, 0xa8, 0xe4, 0x57, 0x39, 0x58, 0x8c, 0xc1, 0xd7, 0x6d, 0xab, 0x6e, 0xf0, 0xfc, + 0xb8, 0x92, 0x88, 0xf5, 0xa9, 0xbe, 0x58, 0x2f, 0x0f, 0x31, 0x89, 0xe3, 0x8c, 0x2e, 0x47, 0x27, + 0xcd, 0x71, 0xf3, 0x13, 0x49, 0xe7, 0x6f, 0x3a, 0x95, 0xf9, 0xc8, 0x2c, 0x79, 0x1e, 0xd4, 0x06, + 0xd4, 0xd2, 0xa8, 0xf7, 0xd0, 0xd5, 0x2c, 0x1a, 0xd0, 0x1a, 0x26, 0x11, 0x17, 0xfe, 0xfb, 0x68, + 0xaf, 0xc3, 0x2c, 0xd4, 0x15, 0xe1, 0x12, 0xad, 0x0f, 0xb0, 0xe1, 0x21, 0x1e, 0xd0, 0x49, 0x98, + 0x70, 0x89, 0x46, 0x6d, 0xab, 0x94, 0xe7, 0x47, 0x8e, 0xe2, 0x85, 0xb9, 0x14, 0x0b, 0x2d, 0x3a, + 0x03, 0x93, 0x26, 0xa1, 0x54, 0x6b, 0x90, 0x52, 0x81, 0x03, 0xe7, 0x05, 0x70, 0x72, 0x23, 0x10, + 0xe3, 0x50, 0x2f, 0x7f, 0x23, 0xc1, 0x5c, 0x1c, 0xa7, 0x75, 0x83, 0x7a, 0xe8, 0xbd, 0x81, 0x8c, + 0x53, 0x46, 0xbb, 0x13, 0xb3, 0xe6, 0xf9, 0xb6, 0x20, 0xdc, 0x4d, 0x85, 0x92, 0x9e, 0x6c, 0xbb, + 0x0b, 0x05, 0xc3, 0x23, 0x26, 0x8b, 0xfa, 0xf8, 0xe9, 0xe9, 0xb5, 0xbf, 0x8d, 0x94, 0x1f, 0xea, + 0xac, 0x60, 0x2c, 0xdc, 0x61, 0xb6, 0x38, 0xa0, 0x90, 0x7f, 0x18, 0xef, 0x3d, 0x3c, 0xcb, 0x42, + 0xf4, 0x85, 0x04, 0x2b, 0x4e, 0x6a, 0x47, 0x11, 0xf7, 0xf9, 0x5f, 0x86, 0xd3, 0xf4, 0x76, 0x84, + 0xc9, 0x0e, 0x61, 0x3d, 0x84, 0xa8, 0xb2, 0x38, 0xcd, 0x4a, 0x06, 0x38, 0xe3, 0x14, 0xe8, 0x2e, + 0x20, 0x53, 0xf3, 0x58, 0x1c, 0x1b, 0x5b, 0x2e, 0xd1, 0x49, 0x9d, 0xb1, 0x8a, 0x06, 0x14, 0xe5, + 0xc4, 0xc6, 0x00, 0x02, 0x0f, 0xb1, 0x42, 0x9f, 0xc0, 0x62, 0x7d, 0xb0, 0x9f, 0x88, 0x64, 0x5c, + 0x3b, 0x20, 0xba, 0x43, 0x3a, 0x91, 0xba, 0xdc, 0xed, 0x54, 0x16, 0x87, 0x28, 0xf0, 0x30, 0x3f, + 0xe8, 0x09, 0x14, 0x5c, 0xbf, 0x45, 0x68, 0x29, 0xcf, 0x9f, 0x33, 0xcb, 0xe1, 0x96, 0xdd, 0x32, + 0xf4, 0x7d, 0xcc, 0xd0, 0x4f, 0x0c, 0xaf, 0x59, 0xf3, 0x79, 0x33, 0xa2, 0xf1, 0xdb, 0x72, 0x15, + 0x0e, 0xf8, 0xe4, 0x36, 0x2c, 0xf4, 0xf7, 0x07, 0xb4, 0x0d, 0xa0, 0x87, 0x25, 0xc9, 0x26, 0xc0, + 0x78, 0x5f, 0x6e, 0xa6, 0x27, 0x50, 0x54, 0xc9, 0x71, 0x2f, 0x8c, 0x44, 0x14, 0xf7, 0xb0, 0xca, + 0xe7, 0x61, 0xe6, 0x96, 0x6b, 0xfb, 0x8e, 0x38, 0x1e, 0x5a, 0x85, 0xbc, 0xa5, 0x99, 0x61, 0x8f, + 0x89, 0x5a, 0xde, 0xa6, 0x66, 0x12, 0xcc, 0x35, 0xf2, 0xe7, 0x12, 0xcc, 0xae, 0x1b, 0xa6, 0xe1, + 0x61, 0x42, 0x1d, 0xdb, 0xa2, 0x04, 0x5d, 0x4c, 0xf4, 0xa5, 0x13, 0x7d, 0x7d, 0xe9, 0x48, 0x02, + 0xdc, 0xd3, 0x91, 0x1e, 0xc3, 0xe4, 0x87, 0x3e, 0xf1, 0x0d, 0xab, 0x21, 0x7a, 0x71, 0x35, 0xe3, + 0x6e, 0x0f, 0x02, 0x64, 0x22, 0xb1, 0xd4, 0x69, 0x56, 0xe3, 0x42, 0x83, 0x43, 0x32, 0xf9, 0x97, + 0x1c, 0x9c, 0xe0, 0x3e, 0x49, 0xfd, 0x0f, 0x19, 0xb6, 0x04, 0x66, 0x5b, 0xbd, 0x57, 0x16, 0xb7, + 0x3b, 0x9d, 0x71, 0xbb, 0x44, 0x88, 0xd4, 0x25, 0x11, 0xc1, 0x64, 0x98, 0x71, 0x92, 0x75, 0xd8, + 0x4c, 0x1f, 0x1f, 0x7d, 0xa6, 0xa3, 0xfb, 0xb0, 0xb4, 0x6d, 0xbb, 0xae, 0xbd, 0x67, 0x58, 0x0d, + 0xee, 0x27, 0x24, 0xc9, 0x73, 0x92, 0x63, 0xdd, 0x4e, 0x65, 0x49, 0x1d, 0x06, 0xc0, 0xc3, 0xed, + 0xe4, 0x3d, 0x58, 0xda, 0x64, 0x5d, 0x83, 0xda, 0xbe, 0xab, 0x93, 0x38, 0xfb, 0x51, 0x05, 0x0a, + 0x6d, 0xe2, 0x6e, 0x07, 0x19, 0x5c, 0x54, 0x8b, 0x2c, 0xf7, 0x1f, 0x33, 0x01, 0x0e, 0xe4, 0xec, + 0x26, 0x56, 0x6c, 0xf9, 0x08, 0xaf, 0xd3, 0xd2, 0x04, 0x87, 0xf2, 0x9b, 0x6c, 0x26, 0x55, 0xb8, + 0x1f, 0x2b, 0x7f, 0x9f, 0x83, 0xe5, 0x94, 0x62, 0x43, 0x5b, 0x30, 0x45, 0xc5, 0xdf, 0xa2, 0x80, + 0xe4, 0x8c, 0x67, 0x10, 0x66, 0x71, 0x43, 0x0f, 0x79, 0x70, 0xc4, 0x82, 0x9e, 0xc1, 0xac, 0x2b, + 0xbc, 0x73, 0x77, 0xa2, 0xb1, 0x9f, 0xcb, 0xa0, 0x1d, 0x8c, 0x49, 0xfc, 0xc4, 0xb8, 0x97, 0x0b, + 0x27, 0xa9, 0x51, 0x1b, 0x16, 0x7a, 0x2e, 0x1b, 0xb8, 0x1b, 0xe7, 0xee, 0xce, 0x67, 0xb8, 0x1b, + 0xfa, 0x0a, 0x6a, 0x49, 0x78, 0x5c, 0xd8, 0xec, 0x63, 0xc4, 0x03, 0x3e, 0xe4, 0xef, 0x72, 0x90, + 0xd1, 0xeb, 0xdf, 0xc2, 0x8e, 0xf6, 0x6e, 0x62, 0x47, 0xbb, 0x7c, 0xa8, 0xf9, 0x95, 0xba, 0xb3, + 0xe9, 0x7d, 0x3b, 0xdb, 0x95, 0xc3, 0xd1, 0x67, 0xef, 0x70, 0xbf, 0xe6, 0xe0, 0xaf, 0xe9, 0xc6, + 0xf1, 0x4e, 0x77, 0x2f, 0xd1, 0x3b, 0xff, 0xdd, 0xd7, 0x3b, 0x4f, 0x8d, 0x40, 0xf1, 0xe7, 0x8e, + 0xd7, 0xb7, 0xe3, 0xfd, 0x28, 0x41, 0x39, 0x3d, 0x6e, 0x6f, 0x61, 0xe7, 0x7b, 0x9a, 0xdc, 0xf9, + 0x2e, 0x1e, 0x2a, 0xbf, 0x52, 0x76, 0xc0, 0x5b, 0x59, 0x69, 0x15, 0xad, 0x6c, 0x23, 0x8c, 0xf1, + 0x2f, 0x73, 0x59, 0x51, 0xe2, 0xcb, 0xe5, 0x01, 0xbf, 0x37, 0x12, 0xd6, 0x37, 0x2d, 0x36, 0x5c, + 0x4c, 0x36, 0x1f, 0x82, 0x5c, 0xd4, 0x61, 0xb2, 0x15, 0x0c, 0x61, 0x51, 0xc5, 0xff, 0x39, 0x68, + 0xfe, 0x65, 0x8d, 0xeb, 0x60, 0xd4, 0x0b, 0x18, 0x0e, 0x99, 0xd1, 0xfb, 0x30, 0x41, 0xf8, 0xaf, + 0xea, 0x11, 0x4a, 0xf9, 0xa0, 0x9f, 0xdf, 0x2a, 0xb0, 0xb4, 0x0b, 0x50, 0x58, 0xd0, 0xca, 0x9f, + 0x49, 0xb0, 0x7a, 0x50, 0x0f, 0x40, 0xee, 0x90, 0x3d, 0xed, 0x70, 0x3b, 0xf7, 0xe8, 0x7b, 0xdb, + 0x57, 0x12, 0x1c, 0x1d, 0xb6, 0x13, 0xb1, 0x82, 0x62, 0x8b, 0x50, 0xb4, 0xc5, 0x44, 0x05, 0xf5, + 0x80, 0x4b, 0xb1, 0xd0, 0xa2, 0x7f, 0xc0, 0x54, 0x53, 0xb3, 0xea, 0x35, 0xe3, 0xe3, 0x70, 0x15, + 0x8f, 0x52, 0xfa, 0xb6, 0x90, 0xe3, 0x08, 0x81, 0x6e, 0xc0, 0x02, 0xb7, 0x5b, 0x27, 0x56, 0xc3, + 0x6b, 0xf2, 0x77, 0x10, 0xdb, 0x46, 0x34, 0x57, 0x1e, 0xf4, 0xe9, 0xf1, 0x80, 0x85, 0xfc, 0x9b, + 0x04, 0xe8, 0x30, 0x0b, 0xc2, 0x59, 0x28, 0x6a, 0x8e, 0xc1, 0xf7, 0xd4, 0xa0, 0xa8, 0x8a, 0xea, + 0x6c, 0xb7, 0x53, 0x29, 0x5e, 0xdb, 0xba, 0x13, 0x08, 0x71, 0xac, 0x67, 0xe0, 0x70, 0x8a, 0x06, + 0xd3, 0x52, 0x80, 0x43, 0xc7, 0x14, 0xc7, 0x7a, 0x74, 0x09, 0x66, 0xf4, 0x96, 0x4f, 0x3d, 0xe2, + 0xd6, 0x74, 0xdb, 0x21, 0xbc, 0x09, 0x4d, 0xa9, 0x47, 0xc5, 0x9d, 0x66, 0xae, 0xf7, 0xe8, 0x70, + 0x02, 0x89, 0x14, 0x00, 0x56, 0x47, 0xd4, 0xd1, 0x98, 0x9f, 0x02, 0xf7, 0x33, 0xc7, 0x1e, 0x6c, + 0x33, 0x92, 0xe2, 0x1e, 0x84, 0xfc, 0x0c, 0x96, 0x6a, 0xc4, 0x6d, 0x1b, 0x3a, 0xb9, 0xa6, 0xeb, + 0xb6, 0x6f, 0x79, 0xe1, 0xc6, 0x5d, 0x85, 0x62, 0x04, 0x13, 0xa5, 0x76, 0x44, 0xf8, 0x2f, 0x46, + 0x5c, 0x38, 0xc6, 0x44, 0xb5, 0x9d, 0x4b, 0xad, 0xed, 0xaf, 0x73, 0x30, 0x19, 0xd3, 0xe7, 0x77, + 0x0d, 0xab, 0x2e, 0x98, 0x8f, 0x87, 0xe8, 0x7b, 0x86, 0x55, 0x7f, 0xd3, 0xa9, 0x4c, 0x0b, 0x18, + 0xfb, 0xc4, 0x1c, 0x88, 0x6e, 0x40, 0xde, 0xa7, 0xc4, 0x15, 0x55, 0x7b, 0x32, 0x23, 0x8f, 0x1f, + 0x51, 0xe2, 0x86, 0x2b, 0xd3, 0x14, 0x23, 0x65, 0x02, 0xcc, 0xad, 0xd1, 0x6d, 0x28, 0x34, 0xd8, + 0x7b, 0x88, 0xc2, 0x3c, 0x95, 0x41, 0xd3, 0xfb, 0xfb, 0x23, 0x78, 0x7c, 0x2e, 0xc1, 0x01, 0x01, + 0x6a, 0xc1, 0x1c, 0x4d, 0x04, 0x8e, 0x3f, 0x52, 0xf6, 0x0a, 0x34, 0x34, 0xd2, 0x2a, 0xea, 0x76, + 0x2a, 0x73, 0x49, 0x15, 0xee, 0xe3, 0x96, 0xab, 0x30, 0xdd, 0x73, 0xad, 0x83, 0xfb, 0xa8, 0x7a, + 0xf5, 0xc5, 0xeb, 0xf2, 0xd8, 0xcb, 0xd7, 0xe5, 0xb1, 0x57, 0xaf, 0xcb, 0x63, 0x9f, 0x76, 0xcb, + 0xd2, 0x8b, 0x6e, 0x59, 0x7a, 0xd9, 0x2d, 0x4b, 0xaf, 0xba, 0x65, 0xe9, 0xa7, 0x6e, 0x59, 0x7a, + 0xfe, 0x73, 0x79, 0xec, 0xe9, 0xb1, 0xd4, 0xff, 0x89, 0xfe, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x16, + 0x4e, 0x14, 0xcf, 0x2f, 0x15, 0x00, 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { @@ -1154,6 +1221,16 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i _ = i var l int _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } { size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1164,9 +1241,11 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i } i-- dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) - i-- - dAtA[i] = 0x8 + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -1477,6 +1556,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Limited != nil { { size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) @@ -1769,6 +1860,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + func (m *FlowDistinguisherMethod) Size() (n int) { if m == nil { return 0 @@ -1900,9 +2006,17 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { } var l int _ = l - n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } return n } @@ -2028,6 +2142,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) { l = m.Limited.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2145,6 +2263,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} func (this *FlowDistinguisherMethod) String() string { if this == nil { return "nil" @@ -2256,8 +2385,10 @@ func (this *LimitedPriorityLevelConfiguration) String() string { return "nil" } s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, - `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, `}`, }, "") return s @@ -2359,6 +2490,7 @@ func (this *PriorityLevelConfigurationSpec) String() string { s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, `}`, }, "") return s @@ -2446,6 +2578,96 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3492,9 +3714,9 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) } - m.AssuredConcurrencyShares = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3504,11 +3726,12 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AssuredConcurrencyShares |= int32(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } + m.NominalConcurrencyShares = &v case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) @@ -3542,6 +3765,46 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4485,6 +4748,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/flowcontrol/v1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1/generated.proto new file mode 100644 index 000000000..33a135889 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1/generated.proto @@ -0,0 +1,520 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.flowcontrol.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/flowcontrol/v1"; + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +message FlowDistinguisherMethod { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + optional string type = 1; +} + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +message FlowSchema { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaSpec spec = 2; + + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional FlowSchemaStatus status = 3; +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +message FlowSchemaCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// FlowSchemaList is a list of FlowSchema objects. +message FlowSchemaList { + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of FlowSchemas. + repeated FlowSchema items = 2; +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +message FlowSchemaSpec { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + optional PriorityLevelConfigurationReference priorityLevelConfiguration = 1; + + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + optional int32 matchingPrecedence = 2; + + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + optional FlowDistinguisherMethod distinguisherMethod = 3; + + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + repeated PolicyRulesWithSubjects rules = 4; +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +message FlowSchemaStatus { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated FlowSchemaCondition conditions = 1; +} + +// GroupSubject holds detailed information for group-kind subject. +message GroupSubject { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + optional string name = 1; +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +message LimitResponse { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + optional QueuingConfiguration queuing = 2; +} + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? +message LimitedPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // + // If not specified, this field defaults to a value of 30. + // + // Setting this field to zero supports the construction of a + // "jail" for this priority level that is used to hold some request(s) + // + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `limitResponse` indicates what to do with requests that can not be executed right now + optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +message NonResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + repeated string nonResourceURLs = 6; +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +message PolicyRulesWithSubjects { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + repeated Subject subjects = 1; + + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + repeated ResourcePolicyRule resourceRules = 2; + + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + repeated NonResourcePolicyRule nonResourceRules = 3; +} + +// PriorityLevelConfiguration represents the configuration of a priority level. +message PriorityLevelConfiguration { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationSpec spec = 2; + + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PriorityLevelConfigurationStatus status = 3; +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +message PriorityLevelConfigurationCondition { + // `type` is the type of the condition. + // Required. + optional string type = 1; + + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + optional string status = 2; + + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + optional string reason = 4; + + // `message` is a human-readable message indicating details about last transition. + optional string message = 5; +} + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +message PriorityLevelConfigurationList { + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // `items` is a list of request-priorities. + repeated PriorityLevelConfiguration items = 2; +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +message PriorityLevelConfigurationReference { + // `name` is the name of the priority level configuration being referenced + // Required. + optional string name = 1; +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +message PriorityLevelConfigurationSpec { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + optional string type = 1; + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; +} + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +message PriorityLevelConfigurationStatus { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + repeated PriorityLevelConfigurationCondition conditions = 1; +} + +// QueuingConfiguration holds the configuration parameters for queuing +message QueuingConfiguration { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + optional int32 queues = 1; + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + optional int32 handSize = 2; + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + optional int32 queueLengthLimit = 3; +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +message ResourcePolicyRule { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + repeated string verbs = 1; + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + repeated string apiGroups = 2; + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + repeated string resources = 3; + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + optional bool clusterScope = 4; + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + repeated string namespaces = 5; +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +message ServiceAccountSubject { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + optional string namespace = 1; + + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + optional string name = 2; +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +message Subject { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + optional string kind = 1; + + // `user` matches based on username. + // +optional + optional UserSubject user = 2; + + // `group` matches based on user group name. + // +optional + optional GroupSubject group = 3; + + // `serviceAccount` matches ServiceAccounts. + // +optional + optional ServiceAccountSubject serviceAccount = 4; +} + +// UserSubject holds detailed information for user-kind subject. +message UserSubject { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + optional string name = 1; +} + diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go b/vendor/k8s.io/api/flowcontrol/v1/register.go similarity index 95% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/register.go rename to vendor/k8s.io/api/flowcontrol/v1/register.go index 0c8a9cc56..02725b514 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go +++ b/vendor/k8s.io/api/flowcontrol/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "flowcontrol.apiserver.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1/types.go similarity index 75% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/types.go rename to vendor/k8s.io/api/flowcontrol/v1/types.go index c52c9aa7b..ad72bcee2 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1/types.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -57,13 +57,56 @@ const ( ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" ) +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -84,10 +127,7 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -314,8 +354,10 @@ type FlowSchemaStatus struct { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []FlowSchemaCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } // FlowSchemaCondition describes conditions for a FlowSchema. @@ -341,10 +383,7 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -364,10 +403,7 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.18 -// +k8s:prerelease-lifecycle-gen:deprecated=1.20 -// +k8s:prerelease-lifecycle-gen:removed=1.21 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -399,6 +435,14 @@ type PriorityLevelConfigurationSpec struct { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` } // PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level @@ -418,26 +462,97 @@ const ( // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { - // `assuredConcurrencyShares` (ACS) configures the execution - // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must - // be a positive number. The server's concurrency limit (SCL) is - // divided among the concurrency-controlled priority levels in - // proportion to their assured concurrency shares. This produces - // the assured concurrency value (ACV) --- the number of requests - // that may be executing at a time --- for each such priority - // level: + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) // - // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // + // If not specified, this field defaults to a value of 30. + // + // Setting this field to zero supports the construction of a + // "jail" for this priority level that is used to hold some request(s) // - // bigger numbers of ACS mean more reserved concurrent requests (at the - // expense of every other PL). - // This field has a default value of 30. // +optional - AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. } // LimitResponse defines how to handle requests that can not be executed right now. @@ -512,8 +627,10 @@ type PriorityLevelConfigurationStatus struct { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional - Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } // PriorityLevelConfigurationCondition defines the condition of priority level. diff --git a/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..b8cb43636 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go @@ -0,0 +1,274 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + +var map_FlowDistinguisherMethod = map[string]string{ + "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", +} + +func (FlowDistinguisherMethod) SwaggerDoc() map[string]string { + return map_FlowDistinguisherMethod +} + +var map_FlowSchema = map[string]string{ + "": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (FlowSchema) SwaggerDoc() map[string]string { + return map_FlowSchema +} + +var map_FlowSchemaCondition = map[string]string{ + "": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (FlowSchemaCondition) SwaggerDoc() map[string]string { + return map_FlowSchemaCondition +} + +var map_FlowSchemaList = map[string]string{ + "": "FlowSchemaList is a list of FlowSchema objects.", + "metadata": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of FlowSchemas.", +} + +func (FlowSchemaList) SwaggerDoc() map[string]string { + return map_FlowSchemaList +} + +var map_FlowSchemaSpec = map[string]string{ + "": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "matchingPrecedence": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "distinguisherMethod": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "rules": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", +} + +func (FlowSchemaSpec) SwaggerDoc() map[string]string { + return map_FlowSchemaSpec +} + +var map_FlowSchemaStatus = map[string]string{ + "": "FlowSchemaStatus represents the current state of a FlowSchema.", + "conditions": "`conditions` is a list of the current states of FlowSchema.", +} + +func (FlowSchemaStatus) SwaggerDoc() map[string]string { + return map_FlowSchemaStatus +} + +var map_GroupSubject = map[string]string{ + "": "GroupSubject holds detailed information for group-kind subject.", + "name": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", +} + +func (GroupSubject) SwaggerDoc() map[string]string { + return map_GroupSubject +} + +var map_LimitResponse = map[string]string{ + "": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "queuing": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", +} + +func (LimitResponse) SwaggerDoc() map[string]string { + return map_LimitResponse +} + +var map_LimitedPriorityLevelConfiguration = map[string]string{ + "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\n\nIf not specified, this field defaults to a value of 30.\n\nSetting this field to zero supports the construction of a \"jail\" for this priority level that is used to hold some request(s)", + "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", +} + +func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_LimitedPriorityLevelConfiguration +} + +var map_NonResourcePolicyRule = map[string]string{ + "": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "nonResourceURLs": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", +} + +func (NonResourcePolicyRule) SwaggerDoc() map[string]string { + return map_NonResourcePolicyRule +} + +var map_PolicyRulesWithSubjects = map[string]string{ + "": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "subjects": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "resourceRules": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "nonResourceRules": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", +} + +func (PolicyRulesWithSubjects) SwaggerDoc() map[string]string { + return map_PolicyRulesWithSubjects +} + +var map_PriorityLevelConfiguration = map[string]string{ + "": "PriorityLevelConfiguration represents the configuration of a priority level.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_PriorityLevelConfiguration +} + +var map_PriorityLevelConfigurationCondition = map[string]string{ + "": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "`type` is the type of the condition. Required.", + "status": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "lastTransitionTime": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "reason": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "`message` is a human-readable message indicating details about last transition.", +} + +func (PriorityLevelConfigurationCondition) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationCondition +} + +var map_PriorityLevelConfigurationList = map[string]string{ + "": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "metadata": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "`items` is a list of request-priorities.", +} + +func (PriorityLevelConfigurationList) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationList +} + +var map_PriorityLevelConfigurationReference = map[string]string{ + "": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "name": "`name` is the name of the priority level configuration being referenced Required.", +} + +func (PriorityLevelConfigurationReference) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationReference +} + +var map_PriorityLevelConfigurationSpec = map[string]string{ + "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", +} + +func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationSpec +} + +var map_PriorityLevelConfigurationStatus = map[string]string{ + "": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "conditions": "`conditions` is the current state of \"request-priority\".", +} + +func (PriorityLevelConfigurationStatus) SwaggerDoc() map[string]string { + return map_PriorityLevelConfigurationStatus +} + +var map_QueuingConfiguration = map[string]string{ + "": "QueuingConfiguration holds the configuration parameters for queuing", + "queues": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "handSize": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "queueLengthLimit": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", +} + +func (QueuingConfiguration) SwaggerDoc() map[string]string { + return map_QueuingConfiguration +} + +var map_ResourcePolicyRule = map[string]string{ + "": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "verbs": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "apiGroups": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "resources": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "clusterScope": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "namespaces": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", +} + +func (ResourcePolicyRule) SwaggerDoc() map[string]string { + return map_ResourcePolicyRule +} + +var map_ServiceAccountSubject = map[string]string{ + "": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "namespace": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "name": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", +} + +func (ServiceAccountSubject) SwaggerDoc() map[string]string { + return map_ServiceAccountSubject +} + +var map_Subject = map[string]string{ + "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "kind": "`kind` indicates which one of the other fields is non-empty. Required", + "user": "`user` matches based on username.", + "group": "`group` matches based on user group name.", + "serviceAccount": "`serviceAccount` matches ServiceAccounts.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +var map_UserSubject = map[string]string{ + "": "UserSubject holds detailed information for user-kind subject.", + "name": "`name` is the username that matches, or \"*\" to match all usernames. Required.", +} + +func (UserSubject) SwaggerDoc() map[string]string { + return map_UserSubject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..f37090b75 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go @@ -0,0 +1,588 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowDistinguisherMethod. +func (in *FlowDistinguisherMethod) DeepCopy() *FlowDistinguisherMethod { + if in == nil { + return nil + } + out := new(FlowDistinguisherMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchema) DeepCopyInto(out *FlowSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchema. +func (in *FlowSchema) DeepCopy() *FlowSchema { + if in == nil { + return nil + } + out := new(FlowSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaCondition) DeepCopyInto(out *FlowSchemaCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaCondition. +func (in *FlowSchemaCondition) DeepCopy() *FlowSchemaCondition { + if in == nil { + return nil + } + out := new(FlowSchemaCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaList) DeepCopyInto(out *FlowSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FlowSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaList. +func (in *FlowSchemaList) DeepCopy() *FlowSchemaList { + if in == nil { + return nil + } + out := new(FlowSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FlowSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaSpec) DeepCopyInto(out *FlowSchemaSpec) { + *out = *in + out.PriorityLevelConfiguration = in.PriorityLevelConfiguration + if in.DistinguisherMethod != nil { + in, out := &in.DistinguisherMethod, &out.DistinguisherMethod + *out = new(FlowDistinguisherMethod) + **out = **in + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRulesWithSubjects, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaSpec. +func (in *FlowSchemaSpec) DeepCopy() *FlowSchemaSpec { + if in == nil { + return nil + } + out := new(FlowSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlowSchemaStatus) DeepCopyInto(out *FlowSchemaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]FlowSchemaCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowSchemaStatus. +func (in *FlowSchemaStatus) DeepCopy() *FlowSchemaStatus { + if in == nil { + return nil + } + out := new(FlowSchemaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupSubject) DeepCopyInto(out *GroupSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupSubject. +func (in *GroupSubject) DeepCopy() *GroupSubject { + if in == nil { + return nil + } + out := new(GroupSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitResponse) DeepCopyInto(out *LimitResponse) { + *out = *in + if in.Queuing != nil { + in, out := &in.Queuing, &out.Queuing + *out = new(QueuingConfiguration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitResponse. +func (in *LimitResponse) DeepCopy() *LimitResponse { + if in == nil { + return nil + } + out := new(LimitResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitedPriorityLevelConfiguration. +func (in *LimitedPriorityLevelConfiguration) DeepCopy() *LimitedPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(LimitedPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourcePolicyRule) DeepCopyInto(out *NonResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourcePolicyRule. +func (in *NonResourcePolicyRule) DeepCopy() *NonResourcePolicyRule { + if in == nil { + return nil + } + out := new(NonResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRulesWithSubjects) DeepCopyInto(out *PolicyRulesWithSubjects) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourcePolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRulesWithSubjects. +func (in *PolicyRulesWithSubjects) DeepCopy() *PolicyRulesWithSubjects { + if in == nil { + return nil + } + out := new(PolicyRulesWithSubjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfiguration) DeepCopyInto(out *PriorityLevelConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfiguration. +func (in *PriorityLevelConfiguration) DeepCopy() *PriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(PriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationCondition) DeepCopyInto(out *PriorityLevelConfigurationCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationCondition. +func (in *PriorityLevelConfigurationCondition) DeepCopy() *PriorityLevelConfigurationCondition { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationList) DeepCopyInto(out *PriorityLevelConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityLevelConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationList. +func (in *PriorityLevelConfigurationList) DeepCopy() *PriorityLevelConfigurationList { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityLevelConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationReference) DeepCopyInto(out *PriorityLevelConfigurationReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationReference. +func (in *PriorityLevelConfigurationReference) DeepCopy() *PriorityLevelConfigurationReference { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigurationSpec) { + *out = *in + if in.Limited != nil { + in, out := &in.Limited, &out.Limited + *out = new(LimitedPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationSpec. +func (in *PriorityLevelConfigurationSpec) DeepCopy() *PriorityLevelConfigurationSpec { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityLevelConfigurationStatus) DeepCopyInto(out *PriorityLevelConfigurationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PriorityLevelConfigurationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityLevelConfigurationStatus. +func (in *PriorityLevelConfigurationStatus) DeepCopy() *PriorityLevelConfigurationStatus { + if in == nil { + return nil + } + out := new(PriorityLevelConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueuingConfiguration) DeepCopyInto(out *QueuingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueuingConfiguration. +func (in *QueuingConfiguration) DeepCopy() *QueuingConfiguration { + if in == nil { + return nil + } + out := new(QueuingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePolicyRule) DeepCopyInto(out *ResourcePolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePolicyRule. +func (in *ResourcePolicyRule) DeepCopy() *ResourcePolicyRule { + if in == nil { + return nil + } + out := new(ResourcePolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountSubject) DeepCopyInto(out *ServiceAccountSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountSubject. +func (in *ServiceAccountSubject) DeepCopy() *ServiceAccountSubject { + if in == nil { + return nil + } + out := new(ServiceAccountSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + if in.User != nil { + in, out := &in.User, &out.User + *out = new(UserSubject) + **out = **in + } + if in.Group != nil { + in, out := &in.Group, &out.Group + *out = new(GroupSubject) + **out = **in + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSubject) DeepCopyInto(out *UserSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSubject. +func (in *UserSubject) DeepCopy() *UserSubject { + if in == nil { + return nil + } + out := new(UserSubject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..fbab9868c --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go index cb06fe5e7..96e368f6f 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +// source: k8s.io/api/flowcontrol/v1beta1/generated.proto package v1beta1 @@ -43,10 +43,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_3a5cb22a034fcb2a, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{0} + return fileDescriptor_3a5cb22a034fcb2a, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{1} + return fileDescriptor_3a5cb22a034fcb2a, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{2} + return fileDescriptor_3a5cb22a034fcb2a, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{3} + return fileDescriptor_3a5cb22a034fcb2a, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{4} + return fileDescriptor_3a5cb22a034fcb2a, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{5} + return fileDescriptor_3a5cb22a034fcb2a, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{6} + return fileDescriptor_3a5cb22a034fcb2a, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{7} + return fileDescriptor_3a5cb22a034fcb2a, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{8} + return fileDescriptor_3a5cb22a034fcb2a, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{9} + return fileDescriptor_3a5cb22a034fcb2a, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{10} + return fileDescriptor_3a5cb22a034fcb2a, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{11} + return fileDescriptor_3a5cb22a034fcb2a, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{12} + return fileDescriptor_3a5cb22a034fcb2a, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{13} + return fileDescriptor_3a5cb22a034fcb2a, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{14} + return fileDescriptor_3a5cb22a034fcb2a, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{15} + return fileDescriptor_3a5cb22a034fcb2a, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{16} + return fileDescriptor_3a5cb22a034fcb2a, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{17} + return fileDescriptor_3a5cb22a034fcb2a, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{18} + return fileDescriptor_3a5cb22a034fcb2a, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{19} + return fileDescriptor_3a5cb22a034fcb2a, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{20} + return fileDescriptor_3a5cb22a034fcb2a, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_80171c2a4e3669de, []int{21} + return fileDescriptor_3a5cb22a034fcb2a, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration") proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowDistinguisherMethod") proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchema") proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaCondition") @@ -685,105 +714,144 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_80171c2a4e3669de) -} - -var fileDescriptor_80171c2a4e3669de = []byte{ - // 1494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcb, 0x73, 0xdb, 0x44, - 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, - 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, - 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, - 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, - 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, - 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x19, 0xae, 0xee, 0x5f, 0xa6, 0x9a, 0xe5, - 0x94, 0xf7, 0xfd, 0x0a, 0xf1, 0x6c, 0xc2, 0x08, 0x2d, 0xb7, 0x88, 0x5d, 0x75, 0xbc, 0xb2, 0x54, - 0x18, 0xae, 0x55, 0xde, 0x6b, 0x38, 0x07, 0xa6, 0x63, 0x33, 0xcf, 0x69, 0x94, 0x5b, 0xe7, 0x2a, - 0x84, 0x19, 0xe7, 0xca, 0x35, 0x62, 0x13, 0xcf, 0x60, 0xa4, 0xaa, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, - 0x0c, 0xec, 0x35, 0xc3, 0xb5, 0xb4, 0x2e, 0x7b, 0x4d, 0xda, 0xaf, 0x9e, 0xa9, 0x59, 0xac, 0xee, - 0x57, 0x34, 0xd3, 0x69, 0x96, 0x6b, 0x4e, 0xcd, 0x29, 0x0b, 0xb7, 0x8a, 0xbf, 0x27, 0xbe, 0xc4, - 0x87, 0xf8, 0x15, 0xc0, 0xad, 0x5e, 0x88, 0xc3, 0x37, 0x0d, 0xb3, 0x6e, 0xd9, 0xc4, 0x3b, 0x2c, - 0xbb, 0xfb, 0x35, 0x2e, 0xa0, 0xe5, 0x26, 0x61, 0x46, 0xb9, 0xd5, 0x97, 0xc4, 0x6a, 0x79, 0x98, - 0x97, 0xe7, 0xdb, 0xcc, 0x6a, 0x92, 0x3e, 0x87, 0xb7, 0xd2, 0x1c, 0xa8, 0x59, 0x27, 0x4d, 0xa3, - 0xd7, 0x4f, 0xbd, 0x03, 0x2b, 0x1f, 0x37, 0x9c, 0x83, 0x2b, 0x16, 0x65, 0x96, 0x5d, 0xf3, 0x2d, - 0x5a, 0x27, 0xde, 0x16, 0x61, 0x75, 0xa7, 0x8a, 0x3e, 0x80, 0x2c, 0x3b, 0x74, 0x49, 0x41, 0x59, - 0x53, 0x5e, 0xcb, 0xeb, 0xa7, 0x1e, 0xb5, 0x4b, 0x13, 0x9d, 0x76, 0x29, 0x7b, 0xeb, 0xd0, 0x25, - 0xcf, 0xda, 0xa5, 0xe3, 0x43, 0xdc, 0xb8, 0x1a, 0x0b, 0x47, 0xf5, 0xfb, 0x0c, 0x00, 0xb7, 0xda, - 0x15, 0xa1, 0xd1, 0x3d, 0x98, 0xe1, 0xe5, 0x56, 0x0d, 0x66, 0x08, 0xcc, 0xd9, 0xf3, 0x67, 0xb5, - 0xb8, 0xd7, 0x51, 0xd6, 0x9a, 0xbb, 0x5f, 0xe3, 0x02, 0xaa, 0x71, 0x6b, 0xad, 0x75, 0x4e, 0xbb, - 0x51, 0xb9, 0x4f, 0x4c, 0xb6, 0x45, 0x98, 0xa1, 0x23, 0x99, 0x05, 0xc4, 0x32, 0x1c, 0xa1, 0xa2, - 0x1d, 0xc8, 0x52, 0x97, 0x98, 0x85, 0x8c, 0x40, 0xd7, 0xb4, 0xd1, 0x27, 0xa9, 0xc5, 0xb9, 0xed, - 0xba, 0xc4, 0xd4, 0xe7, 0xc2, 0x0a, 0xf9, 0x17, 0x16, 0x48, 0xe8, 0x0b, 0x98, 0xa2, 0xcc, 0x60, - 0x3e, 0x2d, 0x4c, 0xf6, 0x65, 0x9c, 0x86, 0x29, 0xfc, 0xf4, 0x05, 0x89, 0x3a, 0x15, 0x7c, 0x63, - 0x89, 0xa7, 0x3e, 0xc9, 0xc0, 0x72, 0x6c, 0xbc, 0xe1, 0xd8, 0x55, 0x8b, 0x59, 0x8e, 0x8d, 0xde, - 0x4d, 0x74, 0xfd, 0x64, 0x4f, 0xd7, 0x57, 0x06, 0xb8, 0xc4, 0x1d, 0x47, 0x6f, 0x47, 0xe9, 0x66, - 0x84, 0xfb, 0x89, 0x64, 0xf0, 0x67, 0xed, 0xd2, 0x62, 0xe4, 0x96, 0xcc, 0x07, 0xb5, 0x00, 0x35, - 0x0c, 0xca, 0x6e, 0x79, 0x86, 0x4d, 0x03, 0x58, 0xab, 0x49, 0x64, 0xd5, 0x6f, 0x8c, 0x77, 0x4e, - 0xdc, 0x43, 0x5f, 0x95, 0x21, 0xd1, 0x66, 0x1f, 0x1a, 0x1e, 0x10, 0x01, 0xbd, 0x0a, 0x53, 0x1e, - 0x31, 0xa8, 0x63, 0x17, 0xb2, 0x22, 0xe5, 0xa8, 0x5f, 0x58, 0x48, 0xb1, 0xd4, 0xa2, 0xd7, 0x61, - 0xba, 0x49, 0x28, 0x35, 0x6a, 0xa4, 0x90, 0x13, 0x86, 0x8b, 0xd2, 0x70, 0x7a, 0x2b, 0x10, 0xe3, - 0x50, 0xaf, 0xfe, 0xa2, 0xc0, 0x42, 0xdc, 0xa7, 0x4d, 0x8b, 0x32, 0x74, 0xb7, 0x8f, 0x7b, 0xda, - 0x78, 0x35, 0x71, 0x6f, 0xc1, 0xbc, 0x25, 0x19, 0x6e, 0x26, 0x94, 0x74, 0xf1, 0xee, 0x06, 0xe4, - 0x2c, 0x46, 0x9a, 0xbc, 0xeb, 0x93, 0x3d, 0xed, 0x4a, 0x21, 0x89, 0x3e, 0x2f, 0x61, 0x73, 0xd7, - 0x38, 0x00, 0x0e, 0x70, 0xd4, 0xbf, 0x26, 0xbb, 0x2b, 0xe0, 0x7c, 0x44, 0x3f, 0x29, 0xb0, 0xea, - 0x7a, 0x96, 0xe3, 0x59, 0xec, 0x70, 0x93, 0xb4, 0x48, 0x63, 0xc3, 0xb1, 0xf7, 0xac, 0x9a, 0xef, - 0x19, 0xbc, 0x95, 0xb2, 0xa8, 0x8d, 0xb4, 0xc8, 0x3b, 0x43, 0x11, 0x30, 0xd9, 0x23, 0x1e, 0xb1, - 0x4d, 0xa2, 0xab, 0x32, 0xa5, 0xd5, 0x11, 0xc6, 0x23, 0x52, 0x41, 0x9f, 0x02, 0x6a, 0x1a, 0x8c, - 0x77, 0xb4, 0xb6, 0xe3, 0x11, 0x93, 0x54, 0x39, 0xaa, 0x20, 0x64, 0x2e, 0x66, 0xc7, 0x56, 0x9f, - 0x05, 0x1e, 0xe0, 0x85, 0xbe, 0x55, 0x60, 0xb9, 0xda, 0x3f, 0x64, 0x24, 0x2f, 0x2f, 0x8d, 0xd3, - 0xe8, 0x01, 0x33, 0x4a, 0x5f, 0xe9, 0xb4, 0x4b, 0xcb, 0x03, 0x14, 0x78, 0x50, 0x30, 0x74, 0x17, - 0x72, 0x9e, 0xdf, 0x20, 0xb4, 0x90, 0x15, 0xc7, 0x9b, 0x1a, 0x75, 0xc7, 0x69, 0x58, 0xe6, 0x21, - 0xe6, 0x2e, 0x9f, 0x5b, 0xac, 0xbe, 0xeb, 0x8b, 0x59, 0x45, 0xe3, 0xb3, 0x16, 0x2a, 0x1c, 0x80, - 0xaa, 0x0f, 0x61, 0xa9, 0x77, 0x68, 0xa0, 0x1a, 0x80, 0x19, 0xde, 0x53, 0x5a, 0x50, 0x44, 0xd8, - 0x37, 0xc7, 0x67, 0x55, 0x74, 0xc7, 0xe3, 0x79, 0x19, 0x89, 0x28, 0xee, 0x82, 0x56, 0xcf, 0xc2, - 0xdc, 0x55, 0xcf, 0xf1, 0x5d, 0x99, 0x23, 0x5a, 0x83, 0xac, 0x6d, 0x34, 0xc3, 0xe9, 0x13, 0x4d, - 0xc4, 0x6d, 0xa3, 0x49, 0xb0, 0xd0, 0xa8, 0x3f, 0x2a, 0x30, 0xbf, 0x69, 0x35, 0x2d, 0x86, 0x09, - 0x75, 0x1d, 0x9b, 0x12, 0x74, 0x31, 0x31, 0xb1, 0x4e, 0xf4, 0x4c, 0xac, 0x23, 0x09, 0xe3, 0xae, - 0x59, 0xf5, 0x25, 0x4c, 0x3f, 0xf0, 0x89, 0x6f, 0xd9, 0x35, 0x39, 0xaf, 0x2f, 0xa4, 0x15, 0x78, - 0x33, 0x30, 0x4f, 0xb0, 0x4d, 0x9f, 0xe5, 0x23, 0x40, 0x6a, 0x70, 0x88, 0xa8, 0xfe, 0xad, 0xc0, - 0x09, 0x11, 0x98, 0x54, 0x87, 0xb3, 0x18, 0xdd, 0x85, 0x82, 0x41, 0xa9, 0xef, 0x91, 0xea, 0x86, - 0x63, 0x9b, 0xbe, 0xc7, 0xf9, 0x7f, 0xb8, 0x5b, 0x37, 0x3c, 0x42, 0x45, 0x35, 0x39, 0x7d, 0x4d, - 0x56, 0x53, 0x58, 0x1f, 0x62, 0x87, 0x87, 0x22, 0xa0, 0xfb, 0x30, 0xdf, 0xe8, 0xae, 0x5d, 0x96, - 0x79, 0x26, 0xad, 0xcc, 0x44, 0xc3, 0xf4, 0x63, 0x32, 0x83, 0x64, 0xd3, 0x71, 0x12, 0x5a, 0x3d, - 0x80, 0x63, 0xdb, 0xfc, 0x0e, 0x53, 0xc7, 0xf7, 0x4c, 0x12, 0x13, 0x10, 0x95, 0x20, 0xd7, 0x22, - 0x5e, 0x25, 0x20, 0x51, 0x5e, 0xcf, 0x73, 0xfa, 0x7d, 0xc6, 0x05, 0x38, 0x90, 0xa3, 0xf7, 0x60, - 0xd1, 0x8e, 0x3d, 0x6f, 0xe3, 0x4d, 0x5a, 0x98, 0x12, 0xa6, 0xcb, 0x9d, 0x76, 0x69, 0x71, 0x3b, - 0xa9, 0xc2, 0xbd, 0xb6, 0x6a, 0x3b, 0x03, 0x2b, 0x43, 0xf8, 0x8e, 0x6e, 0xc3, 0x0c, 0x95, 0xbf, - 0x25, 0x87, 0x4f, 0xa6, 0xd5, 0x2e, 0x7d, 0xe3, 0x69, 0x1b, 0x82, 0xe1, 0x08, 0x0a, 0x39, 0x30, - 0xef, 0xc9, 0x14, 0x44, 0x4c, 0x39, 0x75, 0xcf, 0xa7, 0x61, 0xf7, 0x77, 0x27, 0x6e, 0x2e, 0xee, - 0x06, 0xc4, 0x49, 0x7c, 0xf4, 0x10, 0x96, 0xba, 0xca, 0x0e, 0x62, 0x4e, 0x8a, 0x98, 0x17, 0xd3, - 0x62, 0x0e, 0x3c, 0x14, 0xbd, 0x20, 0xc3, 0x2e, 0x6d, 0xf7, 0xc0, 0xe2, 0xbe, 0x40, 0xea, 0x6f, - 0x19, 0x18, 0x31, 0x88, 0x5f, 0xc2, 0x52, 0x75, 0x2f, 0xb1, 0x54, 0xbd, 0xff, 0xfc, 0x2f, 0xcc, - 0xd0, 0x25, 0xab, 0xde, 0xb3, 0x64, 0x7d, 0xf8, 0x02, 0x31, 0x46, 0x2f, 0x5d, 0xff, 0x64, 0xe0, - 0x95, 0xe1, 0xce, 0xf1, 0x12, 0x76, 0x3d, 0x31, 0xd2, 0x2e, 0xf5, 0x8c, 0xb4, 0x93, 0x63, 0x40, - 0xfc, 0xbf, 0x94, 0xf5, 0x2c, 0x65, 0xbf, 0x2b, 0x50, 0x1c, 0xde, 0xb7, 0x97, 0xb0, 0xa4, 0x7d, - 0x95, 0x5c, 0xd2, 0xde, 0x79, 0x7e, 0x92, 0x0d, 0x59, 0xda, 0xae, 0x8e, 0xe2, 0x56, 0xb4, 0x5e, - 0x8d, 0xf1, 0xc4, 0xfe, 0x3a, 0xb2, 0x55, 0x62, 0x1b, 0x4c, 0xf9, 0x97, 0x90, 0xf0, 0xfe, 0xc8, - 0x36, 0x2a, 0x0d, 0xd2, 0x24, 0x36, 0x93, 0x84, 0xac, 0xc3, 0x74, 0x23, 0x78, 0x1b, 0xe5, 0xa5, - 0x5e, 0x1f, 0xeb, 0x49, 0x1a, 0xf5, 0x94, 0x06, 0xcf, 0xb0, 0x34, 0xc3, 0x21, 0xbc, 0xfa, 0x83, - 0x02, 0x6b, 0x69, 0x97, 0x15, 0x1d, 0x0c, 0x58, 0x76, 0x5e, 0x60, 0x91, 0x1d, 0x7f, 0xf9, 0xf9, - 0x59, 0x81, 0xa3, 0x83, 0x76, 0x0a, 0x4e, 0x7f, 0xbe, 0x48, 0x44, 0x5b, 0x40, 0x44, 0xff, 0x9b, - 0x42, 0x8a, 0xa5, 0x16, 0x9d, 0x86, 0x99, 0xba, 0x61, 0x57, 0x77, 0xad, 0xaf, 0xc3, 0xfd, 0x36, - 0x22, 0xe0, 0x27, 0x52, 0x8e, 0x23, 0x0b, 0x74, 0x05, 0x96, 0x84, 0xdf, 0x26, 0xb1, 0x6b, 0xac, - 0x2e, 0x7a, 0x25, 0xae, 0x72, 0x2e, 0x7e, 0x0f, 0x6e, 0xf6, 0xe8, 0x71, 0x9f, 0x87, 0xfa, 0xaf, - 0x02, 0xe8, 0x79, 0xde, 0xf9, 0x53, 0x90, 0x37, 0x5c, 0x4b, 0x2c, 0x7b, 0xc1, 0x15, 0xc8, 0xeb, - 0xf3, 0x9d, 0x76, 0x29, 0xbf, 0xbe, 0x73, 0x2d, 0x10, 0xe2, 0x58, 0xcf, 0x8d, 0xc3, 0x27, 0x30, - 0x78, 0xea, 0xa4, 0x71, 0x18, 0x98, 0xe2, 0x58, 0x8f, 0x2e, 0xc3, 0x9c, 0xd9, 0xf0, 0x29, 0x23, - 0xde, 0xae, 0xe9, 0xb8, 0x44, 0x8c, 0x8c, 0x19, 0xfd, 0xa8, 0xac, 0x69, 0x6e, 0xa3, 0x4b, 0x87, - 0x13, 0x96, 0x48, 0x03, 0xe0, 0x84, 0xa7, 0xae, 0xc1, 0xe3, 0xe4, 0x44, 0x9c, 0x05, 0x7e, 0x60, - 0xdb, 0x91, 0x14, 0x77, 0x59, 0xa8, 0xf7, 0xe1, 0xd8, 0x2e, 0xf1, 0x5a, 0x96, 0x49, 0xd6, 0x4d, - 0xd3, 0xf1, 0x6d, 0x16, 0xae, 0xad, 0x65, 0xc8, 0x47, 0x66, 0xf2, 0x4e, 0x1c, 0x91, 0xf1, 0xf3, - 0x11, 0x16, 0x8e, 0x6d, 0xa2, 0x4b, 0x98, 0x19, 0x7e, 0x09, 0x33, 0x30, 0x1d, 0xc3, 0x67, 0xf7, - 0x2d, 0xbb, 0x2a, 0x91, 0x8f, 0x87, 0xd6, 0xd7, 0x2d, 0xbb, 0xfa, 0xac, 0x5d, 0x9a, 0x95, 0x66, - 0xfc, 0x13, 0x0b, 0x43, 0x74, 0x0d, 0xb2, 0x3e, 0x25, 0x9e, 0xbc, 0x5e, 0xa7, 0xd2, 0xc8, 0x7c, - 0x9b, 0x12, 0x2f, 0xdc, 0x7c, 0x66, 0x38, 0x32, 0x17, 0x60, 0x01, 0x81, 0xb6, 0x20, 0x57, 0xe3, - 0x87, 0x22, 0xa7, 0xfe, 0xe9, 0x34, 0xac, 0xee, 0x75, 0x3e, 0xa0, 0x81, 0x90, 0xe0, 0x00, 0x05, - 0x3d, 0x80, 0x05, 0x9a, 0x68, 0xa1, 0x38, 0xae, 0x31, 0x36, 0x99, 0x81, 0x8d, 0xd7, 0x51, 0xa7, - 0x5d, 0x5a, 0x48, 0xaa, 0x70, 0x4f, 0x00, 0xb5, 0x0c, 0xb3, 0x5d, 0x05, 0xa6, 0xcf, 0x3f, 0xfd, - 0xcc, 0xa3, 0xa7, 0xc5, 0x89, 0xc7, 0x4f, 0x8b, 0x13, 0x4f, 0x9e, 0x16, 0x27, 0xbe, 0xe9, 0x14, - 0x95, 0x47, 0x9d, 0xa2, 0xf2, 0xb8, 0x53, 0x54, 0x9e, 0x74, 0x8a, 0xca, 0x1f, 0x9d, 0xa2, 0xf2, - 0xdd, 0x9f, 0xc5, 0x89, 0x3b, 0xd3, 0x32, 0xb3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3c, 0xd4, - 0x36, 0xaf, 0xfa, 0x13, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_3a5cb22a034fcb2a) +} + +var fileDescriptor_3a5cb22a034fcb2a = []byte{ + // 1599 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x73, 0xdb, 0xc4, + 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x5f, 0x7e, 0x76, 0xd3, 0x4c, 0xfc, 0x4d, 0xbf, 0x63, 0xa7, 0x62, + 0x86, 0x02, 0x6d, 0xe5, 0xb6, 0xb4, 0xb4, 0xc0, 0xf0, 0x23, 0x4a, 0x4b, 0x29, 0x4d, 0xd2, 0x74, + 0xd3, 0x42, 0xa7, 0x74, 0x86, 0x2a, 0xf2, 0xc6, 0x56, 0x63, 0xfd, 0xa8, 0x56, 0x4a, 0x08, 0xbd, + 0x30, 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x13, 0x17, 0xae, 0x1c, 0x38, 0xd2, 0xe1, 0xd4, 0x63, + 0x4f, 0x86, 0x9a, 0x13, 0xff, 0x01, 0x74, 0x86, 0x19, 0x66, 0x57, 0x2b, 0xc9, 0xb2, 0x2d, 0xcb, + 0xd3, 0xce, 0xf4, 0xc4, 0x2d, 0x7a, 0xfb, 0x79, 0x9f, 0xb7, 0xef, 0xed, 0xfb, 0xe5, 0x80, 0xb2, + 0x7b, 0x81, 0x2a, 0x86, 0x5d, 0xd5, 0x1c, 0xa3, 0xba, 0xd3, 0xb4, 0xf7, 0x75, 0xdb, 0xf2, 0x5c, + 0xbb, 0x59, 0xdd, 0x3b, 0xbd, 0x4d, 0x3c, 0xed, 0x74, 0xb5, 0x4e, 0x2c, 0xe2, 0x6a, 0x1e, 0xa9, + 0x29, 0x8e, 0x6b, 0x7b, 0x36, 0x2a, 0x07, 0x78, 0x45, 0x73, 0x0c, 0xa5, 0x03, 0xaf, 0x08, 0xfc, + 0xd2, 0xc9, 0xba, 0xe1, 0x35, 0xfc, 0x6d, 0x45, 0xb7, 0xcd, 0x6a, 0xdd, 0xae, 0xdb, 0x55, 0xae, + 0xb6, 0xed, 0xef, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x0a, 0xe8, 0x96, 0xce, 0xc6, 0xe6, 0x4d, 0x4d, + 0x6f, 0x18, 0x16, 0x71, 0x0f, 0xaa, 0xce, 0x6e, 0x9d, 0x09, 0x68, 0xd5, 0x24, 0x9e, 0x56, 0xdd, + 0xeb, 0xb9, 0xc4, 0x52, 0x35, 0x4d, 0xcb, 0xf5, 0x2d, 0xcf, 0x30, 0x49, 0x8f, 0xc2, 0x1b, 0x59, + 0x0a, 0x54, 0x6f, 0x10, 0x53, 0xeb, 0xd6, 0x93, 0x7f, 0x92, 0x60, 0xf9, 0xd2, 0xe7, 0xc4, 0x74, + 0xbc, 0x4d, 0xd7, 0xb0, 0x5d, 0xc3, 0x3b, 0x58, 0x23, 0x7b, 0xa4, 0xb9, 0x6a, 0x5b, 0x3b, 0x46, + 0xdd, 0x77, 0x35, 0xcf, 0xb0, 0x2d, 0x74, 0x0b, 0x4a, 0x96, 0x6d, 0x1a, 0x96, 0xc6, 0xe4, 0xba, + 0xef, 0xba, 0xc4, 0xd2, 0x0f, 0xb6, 0x1a, 0x9a, 0x4b, 0x68, 0x49, 0x5a, 0x96, 0x5e, 0x29, 0xa8, + 0xff, 0x6f, 0xb7, 0x2a, 0xa5, 0x8d, 0x14, 0x0c, 0x4e, 0xd5, 0x46, 0xef, 0xc0, 0x6c, 0x93, 0x58, + 0x35, 0x6d, 0xbb, 0x49, 0x36, 0x89, 0xab, 0x13, 0xcb, 0x2b, 0xe5, 0x38, 0xe1, 0x7c, 0xbb, 0x55, + 0x99, 0x5d, 0x4b, 0x1e, 0xe1, 0x6e, 0xac, 0x7c, 0x1b, 0x16, 0x3f, 0x68, 0xda, 0xfb, 0x17, 0x0d, + 0xea, 0x19, 0x56, 0xdd, 0x37, 0x68, 0x83, 0xb8, 0xeb, 0xc4, 0x6b, 0xd8, 0x35, 0xf4, 0x1e, 0xe4, + 0xbd, 0x03, 0x87, 0xf0, 0xfb, 0x15, 0xd5, 0xe3, 0x0f, 0x5b, 0x95, 0x91, 0x76, 0xab, 0x92, 0xbf, + 0x71, 0xe0, 0x90, 0xa7, 0xad, 0xca, 0x91, 0x14, 0x35, 0x76, 0x8c, 0xb9, 0xa2, 0xfc, 0x4d, 0x0e, + 0x80, 0xa1, 0xb6, 0x78, 0xe0, 0xd0, 0x5d, 0x98, 0x60, 0x8f, 0x55, 0xd3, 0x3c, 0x8d, 0x73, 0x4e, + 0x9e, 0x39, 0xa5, 0xc4, 0x99, 0x12, 0xc5, 0x5c, 0x71, 0x76, 0xeb, 0x4c, 0x40, 0x15, 0x86, 0x56, + 0xf6, 0x4e, 0x2b, 0xd7, 0xb6, 0xef, 0x11, 0xdd, 0x5b, 0x27, 0x9e, 0xa6, 0x22, 0x71, 0x0b, 0x88, + 0x65, 0x38, 0x62, 0x45, 0x9b, 0x90, 0xa7, 0x0e, 0xd1, 0x79, 0x00, 0x26, 0xcf, 0x28, 0xca, 0xe0, + 0x3c, 0x54, 0xe2, 0xbb, 0x6d, 0x39, 0x44, 0x57, 0xa7, 0x42, 0x0f, 0xd9, 0x17, 0xe6, 0x4c, 0xe8, + 0x16, 0x8c, 0x51, 0x4f, 0xf3, 0x7c, 0x5a, 0x1a, 0xed, 0xb9, 0x71, 0x16, 0x27, 0xd7, 0x53, 0x67, + 0x04, 0xeb, 0x58, 0xf0, 0x8d, 0x05, 0x9f, 0xfc, 0x38, 0x07, 0xf3, 0x31, 0x78, 0xd5, 0xb6, 0x6a, + 0x06, 0xcf, 0x94, 0xb7, 0x13, 0x51, 0x3f, 0xd6, 0x15, 0xf5, 0xc5, 0x3e, 0x2a, 0x71, 0xc4, 0xd1, + 0x9b, 0xd1, 0x75, 0x73, 0x5c, 0xfd, 0x68, 0xd2, 0xf8, 0xd3, 0x56, 0x65, 0x36, 0x52, 0x4b, 0xde, + 0x07, 0xed, 0x01, 0x6a, 0x6a, 0xd4, 0xbb, 0xe1, 0x6a, 0x16, 0x0d, 0x68, 0x0d, 0x93, 0x08, 0xaf, + 0x5f, 0x1b, 0xee, 0x9d, 0x98, 0x86, 0xba, 0x24, 0x4c, 0xa2, 0xb5, 0x1e, 0x36, 0xdc, 0xc7, 0x02, + 0x7a, 0x19, 0xc6, 0x5c, 0xa2, 0x51, 0xdb, 0x2a, 0xe5, 0xf9, 0x95, 0xa3, 0x78, 0x61, 0x2e, 0xc5, + 0xe2, 0x14, 0xbd, 0x0a, 0xe3, 0x26, 0xa1, 0x54, 0xab, 0x93, 0x52, 0x81, 0x03, 0x67, 0x05, 0x70, + 0x7c, 0x3d, 0x10, 0xe3, 0xf0, 0x5c, 0xfe, 0x59, 0x82, 0x99, 0x38, 0x4e, 0x6b, 0x06, 0xf5, 0xd0, + 0x9d, 0x9e, 0xdc, 0x53, 0x86, 0xf3, 0x89, 0x69, 0xf3, 0xcc, 0x9b, 0x13, 0xe6, 0x26, 0x42, 0x49, + 0x47, 0xde, 0x5d, 0x83, 0x82, 0xe1, 0x11, 0x93, 0x45, 0x7d, 0xb4, 0x2b, 0x5c, 0x19, 0x49, 0xa2, + 0x4e, 0x0b, 0xda, 0xc2, 0x15, 0x46, 0x80, 0x03, 0x1e, 0xf9, 0xcf, 0xd1, 0x4e, 0x0f, 0x58, 0x3e, + 0xa2, 0xef, 0x25, 0x58, 0x72, 0x52, 0x1b, 0x8c, 0x70, 0x6a, 0x35, 0xcb, 0x72, 0x7a, 0x8b, 0xc2, + 0x64, 0x87, 0xb0, 0xbe, 0x42, 0x54, 0x59, 0x5c, 0x69, 0x69, 0x00, 0x78, 0xc0, 0x55, 0xd0, 0x47, + 0x80, 0x4c, 0xcd, 0x63, 0x11, 0xad, 0x6f, 0xba, 0x44, 0x27, 0x35, 0xc6, 0x2a, 0x9a, 0x52, 0x94, + 0x1d, 0xeb, 0x3d, 0x08, 0xdc, 0x47, 0x0b, 0x7d, 0x25, 0xc1, 0x7c, 0xad, 0xb7, 0xc9, 0x88, 0xbc, + 0x3c, 0x3f, 0x4c, 0xa0, 0xfb, 0xf4, 0x28, 0x75, 0xb1, 0xdd, 0xaa, 0xcc, 0xf7, 0x39, 0xc0, 0xfd, + 0x8c, 0xa1, 0x3b, 0x50, 0x70, 0xfd, 0x26, 0xa1, 0xa5, 0x3c, 0x7f, 0xde, 0x4c, 0xab, 0x9b, 0x76, + 0xd3, 0xd0, 0x0f, 0x30, 0x53, 0xf9, 0xc4, 0xf0, 0x1a, 0x5b, 0x3e, 0xef, 0x55, 0x34, 0x7e, 0x6b, + 0x7e, 0x84, 0x03, 0x52, 0xf9, 0x01, 0xcc, 0x75, 0x37, 0x0d, 0x54, 0x07, 0xd0, 0xc3, 0x3a, 0x65, + 0x03, 0x82, 0x99, 0x7d, 0x7d, 0xf8, 0xac, 0x8a, 0x6a, 0x3c, 0xee, 0x97, 0x91, 0x88, 0xe2, 0x0e, + 0x6a, 0xf9, 0x14, 0x4c, 0x5d, 0x76, 0x6d, 0xdf, 0x11, 0x77, 0x44, 0xcb, 0x90, 0xb7, 0x34, 0x33, + 0xec, 0x3e, 0x51, 0x47, 0xdc, 0xd0, 0x4c, 0x82, 0xf9, 0x89, 0xfc, 0x9d, 0x04, 0xd3, 0x6b, 0x86, + 0x69, 0x78, 0x98, 0x50, 0xc7, 0xb6, 0x28, 0x41, 0xe7, 0x12, 0x1d, 0xeb, 0x68, 0x57, 0xc7, 0x3a, + 0x94, 0x00, 0x77, 0xf4, 0xaa, 0x4f, 0x61, 0xfc, 0xbe, 0x4f, 0x7c, 0xc3, 0xaa, 0x8b, 0x7e, 0x7d, + 0x36, 0xcb, 0xc1, 0xeb, 0x01, 0x3c, 0x91, 0x6d, 0xea, 0x24, 0x6b, 0x01, 0xe2, 0x04, 0x87, 0x8c, + 0xf2, 0x3f, 0x39, 0x38, 0xca, 0x0d, 0x93, 0xda, 0x80, 0xa9, 0x7c, 0x07, 0x4a, 0x1a, 0xa5, 0xbe, + 0x4b, 0x6a, 0x69, 0x53, 0x79, 0x59, 0x78, 0x53, 0x5a, 0x49, 0xc1, 0xe1, 0x54, 0x06, 0x74, 0x0f, + 0xa6, 0x9b, 0x9d, 0xbe, 0x0b, 0x37, 0x4f, 0x66, 0xb9, 0x99, 0x08, 0x98, 0xba, 0x20, 0x6e, 0x90, + 0x0c, 0x3a, 0x4e, 0x52, 0xf7, 0xdb, 0x02, 0x46, 0x87, 0xdf, 0x02, 0xd0, 0x35, 0x58, 0xd8, 0xb6, + 0x5d, 0xd7, 0xde, 0x37, 0xac, 0x3a, 0xb7, 0x13, 0x92, 0xe4, 0x39, 0xc9, 0xff, 0xda, 0xad, 0xca, + 0x82, 0xda, 0x0f, 0x80, 0xfb, 0xeb, 0xc9, 0xfb, 0xb0, 0xb0, 0xc1, 0x7a, 0x0a, 0xb5, 0x7d, 0x57, + 0x27, 0x71, 0x41, 0xa0, 0x0a, 0x14, 0xf6, 0x88, 0xbb, 0x1d, 0x24, 0x75, 0x51, 0x2d, 0xb2, 0x72, + 0xf8, 0x98, 0x09, 0x70, 0x20, 0x67, 0x9e, 0x58, 0xb1, 0xe6, 0x4d, 0xbc, 0x46, 0x4b, 0x63, 0x1c, + 0xca, 0x3d, 0xd9, 0x48, 0x1e, 0xe1, 0x6e, 0xac, 0xdc, 0xca, 0xc1, 0x62, 0x4a, 0xfd, 0xa1, 0x9b, + 0x30, 0x41, 0xc5, 0xdf, 0xa2, 0xa6, 0x8e, 0x65, 0xbd, 0x85, 0xd0, 0x8d, 0xbb, 0x7f, 0x48, 0x86, + 0x23, 0x2a, 0x64, 0xc3, 0xb4, 0x2b, 0xae, 0xc0, 0x6d, 0x8a, 0x29, 0x70, 0x26, 0x8b, 0xbb, 0x37, + 0x3a, 0xf1, 0x63, 0xe3, 0x4e, 0x42, 0x9c, 0xe4, 0x47, 0x0f, 0x60, 0xae, 0xc3, 0xed, 0xc0, 0xe6, + 0x28, 0xb7, 0x79, 0x2e, 0xcb, 0x66, 0xdf, 0x47, 0x51, 0x4b, 0xc2, 0xec, 0xdc, 0x46, 0x17, 0x2d, + 0xee, 0x31, 0x24, 0xff, 0x9a, 0x83, 0x01, 0x83, 0xe1, 0x05, 0x2c, 0x79, 0x77, 0x13, 0x4b, 0xde, + 0xbb, 0xcf, 0x3e, 0xf1, 0x52, 0x97, 0xbe, 0x46, 0xd7, 0xd2, 0xf7, 0xfe, 0x73, 0xd8, 0x18, 0xbc, + 0x04, 0xfe, 0x95, 0x83, 0x97, 0xd2, 0x95, 0xe3, 0xa5, 0xf0, 0x6a, 0xa2, 0xc5, 0x9e, 0xef, 0x6a, + 0xb1, 0xc7, 0x86, 0xa0, 0xf8, 0x6f, 0x49, 0xec, 0x5a, 0x12, 0x7f, 0x93, 0xa0, 0x9c, 0x1e, 0xb7, + 0x17, 0xb0, 0x34, 0x7e, 0x96, 0x5c, 0x1a, 0xdf, 0x7a, 0xf6, 0x24, 0x4b, 0x59, 0x22, 0x2f, 0x0f, + 0xca, 0xad, 0x68, 0xdd, 0x1b, 0x62, 0xe4, 0xff, 0x90, 0x1b, 0x14, 0x2a, 0xbe, 0x9d, 0x66, 0xfc, + 0x6a, 0x49, 0x68, 0x5f, 0xb2, 0xd8, 0xe8, 0x31, 0xd9, 0xf4, 0x08, 0x12, 0xb2, 0x01, 0xe3, 0xcd, + 0x60, 0x56, 0x8b, 0xa2, 0x5e, 0x19, 0x6a, 0x44, 0x0e, 0x1a, 0xed, 0xc1, 0x5a, 0x20, 0x60, 0x38, + 0xa4, 0x47, 0x35, 0x18, 0x23, 0xfc, 0xa7, 0xfa, 0xb0, 0x95, 0x9d, 0xf5, 0xc3, 0x5e, 0x05, 0x96, + 0x85, 0x01, 0x0a, 0x0b, 0x6e, 0xf9, 0x5b, 0x09, 0x96, 0xb3, 0x5a, 0x02, 0xda, 0xef, 0xb3, 0xe2, + 0x3d, 0xc7, 0xfa, 0x3e, 0xfc, 0xca, 0xf7, 0xa3, 0x04, 0x87, 0xfb, 0x6d, 0x52, 0xac, 0xc8, 0xd8, + 0xfa, 0x14, 0xed, 0x3e, 0x51, 0x91, 0x5d, 0xe7, 0x52, 0x2c, 0x4e, 0xd1, 0x09, 0x98, 0x68, 0x68, + 0x56, 0x6d, 0xcb, 0xf8, 0x22, 0xdc, 0xea, 0xa3, 0x34, 0xff, 0x50, 0xc8, 0x71, 0x84, 0x40, 0x17, + 0x61, 0x8e, 0xeb, 0xad, 0x11, 0xab, 0xee, 0x35, 0xf8, 0x8b, 0x88, 0xd5, 0x24, 0x9a, 0x3a, 0xd7, + 0xbb, 0xce, 0x71, 0x8f, 0x86, 0xfc, 0xb7, 0x04, 0xe8, 0x59, 0xb6, 0x89, 0xe3, 0x50, 0xd4, 0x1c, + 0x83, 0xaf, 0xb8, 0x41, 0xa1, 0x15, 0xd5, 0xe9, 0x76, 0xab, 0x52, 0x5c, 0xd9, 0xbc, 0x12, 0x08, + 0x71, 0x7c, 0xce, 0xc0, 0xe1, 0xa0, 0x0d, 0x06, 0xaa, 0x00, 0x87, 0x86, 0x29, 0x8e, 0xcf, 0xd1, + 0x05, 0x98, 0xd2, 0x9b, 0x3e, 0xf5, 0x88, 0xbb, 0xa5, 0xdb, 0x0e, 0xe1, 0x8d, 0x69, 0x42, 0x3d, + 0x2c, 0x7c, 0x9a, 0x5a, 0xed, 0x38, 0xc3, 0x09, 0x24, 0x52, 0x00, 0x58, 0x59, 0x51, 0x47, 0x63, + 0x76, 0x0a, 0xdc, 0xce, 0x0c, 0x7b, 0xb0, 0x8d, 0x48, 0x8a, 0x3b, 0x10, 0xf2, 0x3d, 0x58, 0xd8, + 0x22, 0xee, 0x9e, 0xa1, 0x93, 0x15, 0x5d, 0xb7, 0x7d, 0xcb, 0x0b, 0x97, 0xf5, 0x2a, 0x14, 0x23, + 0x98, 0xa8, 0xbc, 0x43, 0xc2, 0x7e, 0x31, 0xe2, 0xc2, 0x31, 0x26, 0x2a, 0xf5, 0x5c, 0x6a, 0xa9, + 0xff, 0x92, 0x83, 0xf1, 0x98, 0x3e, 0xbf, 0x6b, 0x58, 0x35, 0xc1, 0x7c, 0x24, 0x44, 0x5f, 0x35, + 0xac, 0xda, 0xd3, 0x56, 0x65, 0x52, 0xc0, 0xd8, 0x27, 0xe6, 0x40, 0x74, 0x05, 0xf2, 0x3e, 0x25, + 0xae, 0x28, 0xe2, 0xe3, 0x59, 0xc9, 0x7c, 0x93, 0x12, 0x37, 0xdc, 0xaf, 0x26, 0x18, 0x33, 0x13, + 0x60, 0x4e, 0x81, 0xd6, 0xa1, 0x50, 0x67, 0x8f, 0x22, 0xea, 0xf4, 0x44, 0x16, 0x57, 0xe7, 0x8f, + 0x98, 0x20, 0x0d, 0xb8, 0x04, 0x07, 0x2c, 0xe8, 0x3e, 0xcc, 0xd0, 0x44, 0x08, 0xf9, 0x73, 0x0d, + 0xb1, 0x2f, 0xf5, 0x0d, 0xbc, 0x8a, 0xda, 0xad, 0xca, 0x4c, 0xf2, 0x08, 0x77, 0x19, 0x90, 0xab, + 0x30, 0xd9, 0xe1, 0x60, 0x76, 0x97, 0x55, 0x2f, 0x3e, 0x7c, 0x52, 0x1e, 0x79, 0xf4, 0xa4, 0x3c, + 0xf2, 0xf8, 0x49, 0x79, 0xe4, 0xcb, 0x76, 0x59, 0x7a, 0xd8, 0x2e, 0x4b, 0x8f, 0xda, 0x65, 0xe9, + 0x71, 0xbb, 0x2c, 0xfd, 0xde, 0x2e, 0x4b, 0x5f, 0xff, 0x51, 0x1e, 0xb9, 0x5d, 0x1e, 0xfc, 0xbf, + 0xd8, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, 0x42, 0x4c, 0x0f, 0xac, 0x15, 0x00, 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { @@ -1154,6 +1222,16 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i _ = i var l int _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } { size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1477,6 +1555,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Limited != nil { { size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) @@ -1769,6 +1859,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + func (m *FlowDistinguisherMethod) Size() (n int) { if m == nil { return 0 @@ -1903,6 +2008,12 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } return n } @@ -2028,6 +2139,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) { l = m.Limited.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2145,6 +2260,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} func (this *FlowDistinguisherMethod) String() string { if this == nil { return "nil" @@ -2258,6 +2384,8 @@ func (this *LimitedPriorityLevelConfiguration) String() string { s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, `}`, }, "") return s @@ -2359,6 +2487,7 @@ func (this *PriorityLevelConfigurationSpec) String() string { s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, `}`, }, "") return s @@ -2446,6 +2575,96 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3542,6 +3761,46 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4485,6 +4744,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index b0d9525d6..61ed3833a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -26,7 +26,41 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/flowcontrol/v1beta1"; + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} // FlowDistinguisherMethod specifies the method of a flow distinguisher. message FlowDistinguisherMethod { @@ -42,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -67,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -81,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -158,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces @@ -176,6 +210,35 @@ message LimitedPriorityLevelConfiguration { // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; } // NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the @@ -232,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -257,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -271,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; @@ -303,6 +366,14 @@ message PriorityLevelConfigurationSpec { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; } // PriorityLevelConfigurationStatus represents the current state of a "request-priority". diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index d6744f6f6..abc3e4200 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -77,7 +77,9 @@ const ( // is a boolean false or has an invalid boolean representation // (if the cluster operator sets it to 'false' it will be stomped) // - any changes to the spec made by the cluster operator will be - // stomped. + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". // // The kube-apiserver will apply updates on the suggested configuration if: // - the cluster operator has enabled auto-update by setting the annotation @@ -105,7 +107,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchema +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -127,7 +129,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,FlowSchemaList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -382,7 +384,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfiguration +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -403,7 +405,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.20 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta2,PriorityLevelConfigurationList +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -435,6 +437,14 @@ type PriorityLevelConfigurationSpec struct { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` } // PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level @@ -456,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces @@ -474,6 +484,72 @@ type LimitedPriorityLevelConfiguration struct { // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. } // LimitResponse defines how to handle requests that can not be executed right now. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index 9f8eacead..d69bdac62 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + var map_FlowDistinguisherMethod = map[string]string{ "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", @@ -112,8 +122,10 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", } func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { @@ -188,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{ "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", } func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go index b7b84634a..965d5e55a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,32 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { *out = *in @@ -212,6 +238,16 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } return } @@ -390,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu *out = new(LimitedPriorityLevelConfiguration) (*in).DeepCopyInto(*out) } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go index ed1e16c26..59b36b3bf 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.prerelease-lifecycle.go @@ -40,7 +40,7 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -64,7 +64,7 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -88,7 +88,7 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. @@ -112,7 +112,7 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go index fe550271b..f646446df 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +// source: k8s.io/api/flowcontrol/v1beta2/generated.proto package v1beta2 @@ -43,10 +43,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_2e620af2eea53237, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{0} + return fileDescriptor_2e620af2eea53237, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{1} + return fileDescriptor_2e620af2eea53237, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{2} + return fileDescriptor_2e620af2eea53237, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{3} + return fileDescriptor_2e620af2eea53237, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{4} + return fileDescriptor_2e620af2eea53237, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{5} + return fileDescriptor_2e620af2eea53237, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{6} + return fileDescriptor_2e620af2eea53237, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{7} + return fileDescriptor_2e620af2eea53237, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{8} + return fileDescriptor_2e620af2eea53237, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{9} + return fileDescriptor_2e620af2eea53237, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{10} + return fileDescriptor_2e620af2eea53237, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{11} + return fileDescriptor_2e620af2eea53237, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{12} + return fileDescriptor_2e620af2eea53237, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{13} + return fileDescriptor_2e620af2eea53237, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{14} + return fileDescriptor_2e620af2eea53237, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{15} + return fileDescriptor_2e620af2eea53237, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{16} + return fileDescriptor_2e620af2eea53237, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{17} + return fileDescriptor_2e620af2eea53237, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{18} + return fileDescriptor_2e620af2eea53237, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{19} + return fileDescriptor_2e620af2eea53237, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{20} + return fileDescriptor_2e620af2eea53237, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_ed300aa8e672704e, []int{21} + return fileDescriptor_2e620af2eea53237, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration") proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowDistinguisherMethod") proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchema") proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaCondition") @@ -685,105 +714,145 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto", fileDescriptor_ed300aa8e672704e) -} - -var fileDescriptor_ed300aa8e672704e = []byte{ - // 1495 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcb, 0x73, 0xdb, 0x44, - 0x18, 0x8f, 0x1c, 0x3b, 0x89, 0xbf, 0x3c, 0xbb, 0x69, 0x27, 0x9e, 0x74, 0xc6, 0x4e, 0xc5, 0x0c, - 0x05, 0xda, 0xca, 0x6d, 0x69, 0x69, 0x81, 0xe1, 0x11, 0xa5, 0x50, 0x4a, 0x93, 0x34, 0xdd, 0xb4, - 0xc0, 0x94, 0xce, 0x50, 0x59, 0xde, 0xd8, 0x6a, 0x6c, 0x49, 0xd5, 0xae, 0x9c, 0x09, 0xbd, 0x30, - 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x3b, 0xff, 0x00, 0x47, 0x3a, 0x9c, 0x7a, 0xec, 0xc9, 0x50, - 0x73, 0xe2, 0xc0, 0x1d, 0x7a, 0x62, 0x76, 0xb5, 0x92, 0x2c, 0xbf, 0xe4, 0x69, 0x67, 0x7a, 0xe2, - 0x66, 0x7d, 0x8f, 0xdf, 0xf7, 0xd8, 0xdf, 0x7e, 0xfb, 0x19, 0xae, 0xee, 0x5f, 0xa6, 0x9a, 0xe5, - 0x94, 0xf7, 0xfd, 0x0a, 0xf1, 0x6c, 0xc2, 0x08, 0x2d, 0xb7, 0x88, 0x5d, 0x75, 0xbc, 0xb2, 0x54, - 0x18, 0xae, 0x55, 0xde, 0x6b, 0x38, 0x07, 0xa6, 0x63, 0x33, 0xcf, 0x69, 0x94, 0x5b, 0xe7, 0x2a, - 0x84, 0x19, 0xe7, 0xcb, 0x35, 0x62, 0x13, 0xcf, 0x60, 0xa4, 0xaa, 0xb9, 0x9e, 0xc3, 0x1c, 0x54, - 0x0c, 0xec, 0x35, 0xc3, 0xb5, 0xb4, 0x2e, 0x7b, 0x4d, 0xda, 0xaf, 0x9e, 0xa9, 0x59, 0xac, 0xee, - 0x57, 0x34, 0xd3, 0x69, 0x96, 0x6b, 0x4e, 0xcd, 0x29, 0x0b, 0xb7, 0x8a, 0xbf, 0x27, 0xbe, 0xc4, - 0x87, 0xf8, 0x15, 0xc0, 0xad, 0x5e, 0x88, 0xc3, 0x37, 0x0d, 0xb3, 0x6e, 0xd9, 0xc4, 0x3b, 0x2c, - 0xbb, 0xfb, 0x35, 0x2e, 0xa0, 0xe5, 0x26, 0x61, 0x46, 0xb9, 0x75, 0xae, 0x37, 0x89, 0xd5, 0xf2, - 0x30, 0x2f, 0xcf, 0xb7, 0x99, 0xd5, 0x24, 0x7d, 0x0e, 0x6f, 0xa5, 0x39, 0x50, 0xb3, 0x4e, 0x9a, - 0x46, 0xaf, 0x9f, 0x7a, 0x07, 0x56, 0x3e, 0x6e, 0x38, 0x07, 0x57, 0x2c, 0xca, 0x2c, 0xbb, 0xe6, - 0x5b, 0xb4, 0x4e, 0xbc, 0x2d, 0xc2, 0xea, 0x4e, 0x15, 0x7d, 0x00, 0x59, 0x76, 0xe8, 0x92, 0x82, - 0xb2, 0xa6, 0xbc, 0x96, 0xd7, 0x4f, 0x3d, 0x6a, 0x97, 0x26, 0x3a, 0xed, 0x52, 0xf6, 0xd6, 0xa1, - 0x4b, 0x9e, 0xb5, 0x4b, 0xc7, 0x87, 0xb8, 0x71, 0x35, 0x16, 0x8e, 0xea, 0xf7, 0x19, 0x00, 0x6e, - 0xb5, 0x2b, 0x42, 0xa3, 0x7b, 0x30, 0xc3, 0xcb, 0xad, 0x1a, 0xcc, 0x10, 0x98, 0xb3, 0xe7, 0xcf, - 0x6a, 0x71, 0xaf, 0xa3, 0xac, 0x35, 0x77, 0xbf, 0xc6, 0x05, 0x54, 0xe3, 0xd6, 0x5a, 0xeb, 0x9c, - 0x76, 0xa3, 0x72, 0x9f, 0x98, 0x6c, 0x8b, 0x30, 0x43, 0x47, 0x32, 0x0b, 0x88, 0x65, 0x38, 0x42, - 0x45, 0x3b, 0x90, 0xa5, 0x2e, 0x31, 0x0b, 0x19, 0x81, 0xae, 0x69, 0xa3, 0x4f, 0x52, 0x8b, 0x73, - 0xdb, 0x75, 0x89, 0xa9, 0xcf, 0x85, 0x15, 0xf2, 0x2f, 0x2c, 0x90, 0xd0, 0x17, 0x30, 0x45, 0x99, - 0xc1, 0x7c, 0x5a, 0x98, 0xec, 0xcb, 0x38, 0x0d, 0x53, 0xf8, 0xe9, 0x0b, 0x12, 0x75, 0x2a, 0xf8, - 0xc6, 0x12, 0x4f, 0x7d, 0x92, 0x81, 0xe5, 0xd8, 0x78, 0xc3, 0xb1, 0xab, 0x16, 0xb3, 0x1c, 0x1b, - 0xbd, 0x9b, 0xe8, 0xfa, 0xc9, 0x9e, 0xae, 0xaf, 0x0c, 0x70, 0x89, 0x3b, 0x8e, 0xde, 0x8e, 0xd2, - 0xcd, 0x08, 0xf7, 0x13, 0xc9, 0xe0, 0xcf, 0xda, 0xa5, 0xc5, 0xc8, 0x2d, 0x99, 0x0f, 0x6a, 0x01, - 0x6a, 0x18, 0x94, 0xdd, 0xf2, 0x0c, 0x9b, 0x06, 0xb0, 0x56, 0x93, 0xc8, 0xaa, 0xdf, 0x18, 0xef, - 0x9c, 0xb8, 0x87, 0xbe, 0x2a, 0x43, 0xa2, 0xcd, 0x3e, 0x34, 0x3c, 0x20, 0x02, 0x7a, 0x15, 0xa6, - 0x3c, 0x62, 0x50, 0xc7, 0x2e, 0x64, 0x45, 0xca, 0x51, 0xbf, 0xb0, 0x90, 0x62, 0xa9, 0x45, 0xaf, - 0xc3, 0x74, 0x93, 0x50, 0x6a, 0xd4, 0x48, 0x21, 0x27, 0x0c, 0x17, 0xa5, 0xe1, 0xf4, 0x56, 0x20, - 0xc6, 0xa1, 0x5e, 0xfd, 0x45, 0x81, 0x85, 0xb8, 0x4f, 0x9b, 0x16, 0x65, 0xe8, 0x6e, 0x1f, 0xf7, - 0xb4, 0xf1, 0x6a, 0xe2, 0xde, 0x82, 0x79, 0x4b, 0x32, 0xdc, 0x4c, 0x28, 0xe9, 0xe2, 0xdd, 0x0d, - 0xc8, 0x59, 0x8c, 0x34, 0x79, 0xd7, 0x27, 0x7b, 0xda, 0x95, 0x42, 0x12, 0x7d, 0x5e, 0xc2, 0xe6, - 0xae, 0x71, 0x00, 0x1c, 0xe0, 0xa8, 0x7f, 0x4d, 0x76, 0x57, 0xc0, 0xf9, 0x88, 0x7e, 0x52, 0x60, - 0xd5, 0xf5, 0x2c, 0xc7, 0xb3, 0xd8, 0xe1, 0x26, 0x69, 0x91, 0xc6, 0x86, 0x63, 0xef, 0x59, 0x35, - 0xdf, 0x33, 0x78, 0x2b, 0x65, 0x51, 0x1b, 0x69, 0x91, 0x77, 0x86, 0x22, 0x60, 0xb2, 0x47, 0x3c, - 0x62, 0x9b, 0x44, 0x57, 0x65, 0x4a, 0xab, 0x23, 0x8c, 0x47, 0xa4, 0x82, 0x3e, 0x05, 0xd4, 0x34, - 0x18, 0xef, 0x68, 0x6d, 0xc7, 0x23, 0x26, 0xa9, 0x72, 0x54, 0x41, 0xc8, 0x5c, 0xcc, 0x8e, 0xad, - 0x3e, 0x0b, 0x3c, 0xc0, 0x0b, 0x7d, 0xab, 0xc0, 0x72, 0xb5, 0x7f, 0xc8, 0x48, 0x5e, 0x5e, 0x1a, - 0xa7, 0xd1, 0x03, 0x66, 0x94, 0xbe, 0xd2, 0x69, 0x97, 0x96, 0x07, 0x28, 0xf0, 0xa0, 0x60, 0xe8, - 0x2e, 0xe4, 0x3c, 0xbf, 0x41, 0x68, 0x21, 0x2b, 0x8e, 0x37, 0x35, 0xea, 0x8e, 0xd3, 0xb0, 0xcc, - 0x43, 0xcc, 0x5d, 0x3e, 0xb7, 0x58, 0x7d, 0xd7, 0x17, 0xb3, 0x8a, 0xc6, 0x67, 0x2d, 0x54, 0x38, - 0x00, 0x55, 0x1f, 0xc2, 0x52, 0xef, 0xd0, 0x40, 0x35, 0x00, 0x33, 0xbc, 0xa7, 0xb4, 0xa0, 0x88, - 0xb0, 0x6f, 0x8e, 0xcf, 0xaa, 0xe8, 0x8e, 0xc7, 0xf3, 0x32, 0x12, 0x51, 0xdc, 0x05, 0xad, 0x9e, - 0x85, 0xb9, 0xab, 0x9e, 0xe3, 0xbb, 0x32, 0x47, 0xb4, 0x06, 0x59, 0xdb, 0x68, 0x86, 0xd3, 0x27, - 0x9a, 0x88, 0xdb, 0x46, 0x93, 0x60, 0xa1, 0x51, 0x7f, 0x54, 0x60, 0x7e, 0xd3, 0x6a, 0x5a, 0x0c, - 0x13, 0xea, 0x3a, 0x36, 0x25, 0xe8, 0x62, 0x62, 0x62, 0x9d, 0xe8, 0x99, 0x58, 0x47, 0x12, 0xc6, - 0x5d, 0xb3, 0xea, 0x4b, 0x98, 0x7e, 0xe0, 0x13, 0xdf, 0xb2, 0x6b, 0x72, 0x5e, 0x5f, 0x48, 0x2b, - 0xf0, 0x66, 0x60, 0x9e, 0x60, 0x9b, 0x3e, 0xcb, 0x47, 0x80, 0xd4, 0xe0, 0x10, 0x51, 0xfd, 0x5b, - 0x81, 0x13, 0x22, 0x30, 0xa9, 0x0e, 0x67, 0x31, 0xba, 0x0b, 0x05, 0x83, 0x52, 0xdf, 0x23, 0xd5, - 0x0d, 0xc7, 0x36, 0x7d, 0x8f, 0xf3, 0xff, 0x70, 0xb7, 0x6e, 0x78, 0x84, 0x8a, 0x6a, 0x72, 0xfa, - 0x9a, 0xac, 0xa6, 0xb0, 0x3e, 0xc4, 0x0e, 0x0f, 0x45, 0x40, 0xf7, 0x61, 0xbe, 0xd1, 0x5d, 0xbb, - 0x2c, 0xf3, 0x4c, 0x5a, 0x99, 0x89, 0x86, 0xe9, 0xc7, 0x64, 0x06, 0xc9, 0xa6, 0xe3, 0x24, 0xb4, - 0x7a, 0x00, 0xc7, 0xb6, 0xf9, 0x1d, 0xa6, 0x8e, 0xef, 0x99, 0x24, 0x26, 0x20, 0x2a, 0x41, 0xae, - 0x45, 0xbc, 0x4a, 0x40, 0xa2, 0xbc, 0x9e, 0xe7, 0xf4, 0xfb, 0x8c, 0x0b, 0x70, 0x20, 0x47, 0xef, - 0xc1, 0xa2, 0x1d, 0x7b, 0xde, 0xc6, 0x9b, 0xb4, 0x30, 0x25, 0x4c, 0x97, 0x3b, 0xed, 0xd2, 0xe2, - 0x76, 0x52, 0x85, 0x7b, 0x6d, 0xd5, 0x76, 0x06, 0x56, 0x86, 0xf0, 0x1d, 0xdd, 0x86, 0x19, 0x2a, - 0x7f, 0x4b, 0x0e, 0x9f, 0x4c, 0xab, 0x5d, 0xfa, 0xc6, 0xd3, 0x36, 0x04, 0xc3, 0x11, 0x14, 0x72, - 0x60, 0xde, 0x93, 0x29, 0x88, 0x98, 0x72, 0xea, 0x9e, 0x4f, 0xc3, 0xee, 0xef, 0x4e, 0xdc, 0x5c, - 0xdc, 0x0d, 0x88, 0x93, 0xf8, 0xe8, 0x21, 0x2c, 0x75, 0x95, 0x1d, 0xc4, 0x9c, 0x14, 0x31, 0x2f, - 0xa6, 0xc5, 0x1c, 0x78, 0x28, 0x7a, 0x41, 0x86, 0x5d, 0xda, 0xee, 0x81, 0xc5, 0x7d, 0x81, 0xd4, - 0xdf, 0x32, 0x30, 0x62, 0x10, 0xbf, 0x84, 0xa5, 0xea, 0x5e, 0x62, 0xa9, 0x7a, 0xff, 0xf9, 0x5f, - 0x98, 0xa1, 0x4b, 0x56, 0xbd, 0x67, 0xc9, 0xfa, 0xf0, 0x05, 0x62, 0x8c, 0x5e, 0xba, 0xfe, 0xc9, - 0xc0, 0x2b, 0xc3, 0x9d, 0xe3, 0x25, 0xec, 0x7a, 0x62, 0xa4, 0x5d, 0xea, 0x19, 0x69, 0x27, 0xc7, - 0x80, 0xf8, 0x7f, 0x29, 0xeb, 0x59, 0xca, 0x7e, 0x57, 0xa0, 0x38, 0xbc, 0x6f, 0x2f, 0x61, 0x49, - 0xfb, 0x2a, 0xb9, 0xa4, 0xbd, 0xf3, 0xfc, 0x24, 0x1b, 0xb2, 0xb4, 0x5d, 0x1d, 0xc5, 0xad, 0x68, - 0xbd, 0x1a, 0xe3, 0x89, 0xfd, 0x75, 0x64, 0xab, 0xc4, 0x36, 0x98, 0xf2, 0x2f, 0x21, 0xe1, 0xfd, - 0x91, 0x6d, 0x54, 0x1a, 0xa4, 0x49, 0x6c, 0x26, 0x09, 0x59, 0x87, 0xe9, 0x46, 0xf0, 0x36, 0xca, - 0x4b, 0xbd, 0x3e, 0xd6, 0x93, 0x34, 0xea, 0x29, 0x0d, 0x9e, 0x61, 0x69, 0x86, 0x43, 0x78, 0xf5, - 0x07, 0x05, 0xd6, 0xd2, 0x2e, 0x2b, 0x3a, 0x18, 0xb0, 0xec, 0xbc, 0xc0, 0x22, 0x3b, 0xfe, 0xf2, - 0xf3, 0xb3, 0x02, 0x47, 0x07, 0xed, 0x14, 0x9c, 0xfe, 0x7c, 0x91, 0x88, 0xb6, 0x80, 0x88, 0xfe, - 0x37, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x0d, 0x33, 0x75, 0xc3, 0xae, 0xee, 0x5a, 0x5f, 0x87, 0xfb, - 0x6d, 0x44, 0xc0, 0x4f, 0xa4, 0x1c, 0x47, 0x16, 0xe8, 0x0a, 0x2c, 0x09, 0xbf, 0x4d, 0x62, 0xd7, - 0x58, 0x5d, 0xf4, 0x4a, 0x5c, 0xe5, 0x5c, 0xfc, 0x1e, 0xdc, 0xec, 0xd1, 0xe3, 0x3e, 0x0f, 0xf5, - 0x5f, 0x05, 0xd0, 0xf3, 0xbc, 0xf3, 0xa7, 0x20, 0x6f, 0xb8, 0x96, 0x58, 0xf6, 0x82, 0x2b, 0x90, - 0xd7, 0xe7, 0x3b, 0xed, 0x52, 0x7e, 0x7d, 0xe7, 0x5a, 0x20, 0xc4, 0xb1, 0x9e, 0x1b, 0x87, 0x4f, - 0x60, 0xf0, 0xd4, 0x49, 0xe3, 0x30, 0x30, 0xc5, 0xb1, 0x1e, 0x5d, 0x86, 0x39, 0xb3, 0xe1, 0x53, - 0x46, 0xbc, 0x5d, 0xd3, 0x71, 0x89, 0x18, 0x19, 0x33, 0xfa, 0x51, 0x59, 0xd3, 0xdc, 0x46, 0x97, - 0x0e, 0x27, 0x2c, 0x91, 0x06, 0xc0, 0x09, 0x4f, 0x5d, 0x83, 0xc7, 0xc9, 0x89, 0x38, 0x0b, 0xfc, - 0xc0, 0xb6, 0x23, 0x29, 0xee, 0xb2, 0x50, 0xef, 0xc3, 0xb1, 0x5d, 0xe2, 0xb5, 0x2c, 0x93, 0xac, - 0x9b, 0xa6, 0xe3, 0xdb, 0x2c, 0x5c, 0x5b, 0xcb, 0x90, 0x8f, 0xcc, 0xe4, 0x9d, 0x38, 0x22, 0xe3, - 0xe7, 0x23, 0x2c, 0x1c, 0xdb, 0x44, 0x97, 0x30, 0x33, 0xfc, 0x12, 0x66, 0x60, 0x3a, 0x86, 0xcf, - 0xee, 0x5b, 0x76, 0x55, 0x22, 0x1f, 0x0f, 0xad, 0xaf, 0x5b, 0x76, 0xf5, 0x59, 0xbb, 0x34, 0x2b, - 0xcd, 0xf8, 0x27, 0x16, 0x86, 0xe8, 0x1a, 0x64, 0x7d, 0x4a, 0x3c, 0x79, 0xbd, 0x4e, 0xa5, 0x91, - 0xf9, 0x36, 0x25, 0x5e, 0xb8, 0xf9, 0xcc, 0x70, 0x64, 0x2e, 0xc0, 0x02, 0x02, 0x6d, 0x41, 0xae, - 0xc6, 0x0f, 0x45, 0x4e, 0xfd, 0xd3, 0x69, 0x58, 0xdd, 0xeb, 0x7c, 0x40, 0x03, 0x21, 0xc1, 0x01, - 0x0a, 0x7a, 0x00, 0x0b, 0x34, 0xd1, 0x42, 0x71, 0x5c, 0x63, 0x6c, 0x32, 0x03, 0x1b, 0xaf, 0xa3, - 0x4e, 0xbb, 0xb4, 0x90, 0x54, 0xe1, 0x9e, 0x00, 0x6a, 0x19, 0x66, 0xbb, 0x0a, 0x4c, 0x9f, 0x7f, - 0xfa, 0x99, 0x47, 0x4f, 0x8b, 0x13, 0x8f, 0x9f, 0x16, 0x27, 0x9e, 0x3c, 0x2d, 0x4e, 0x7c, 0xd3, - 0x29, 0x2a, 0x8f, 0x3a, 0x45, 0xe5, 0x71, 0xa7, 0xa8, 0x3c, 0xe9, 0x14, 0x95, 0x3f, 0x3a, 0x45, - 0xe5, 0xbb, 0x3f, 0x8b, 0x13, 0x77, 0xa6, 0x65, 0x66, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc1, - 0xb4, 0x84, 0x2d, 0xfa, 0x13, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/flowcontrol/v1beta2/generated.proto", fileDescriptor_2e620af2eea53237) +} + +var fileDescriptor_2e620af2eea53237 = []byte{ + // 1602 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x73, 0xdb, 0xd4, + 0x16, 0x8f, 0x1c, 0x3b, 0x89, 0x4f, 0x3e, 0x7b, 0xd3, 0x4c, 0xfc, 0xd2, 0x37, 0x76, 0xaa, 0x37, + 0xf3, 0xfa, 0x1e, 0x6d, 0xe5, 0x36, 0xb4, 0xb4, 0xc0, 0xf0, 0x11, 0xa5, 0xa5, 0x94, 0x26, 0x69, + 0x7a, 0xd3, 0x42, 0xa7, 0x74, 0x86, 0x2a, 0xf2, 0x8d, 0xad, 0xc6, 0xfa, 0xa8, 0xae, 0x94, 0x10, + 0xba, 0x61, 0xf8, 0x0b, 0x58, 0xc3, 0x92, 0x05, 0x2b, 0x36, 0x6c, 0x59, 0xb0, 0xa4, 0xc3, 0xaa, + 0xcb, 0xae, 0x0c, 0x35, 0x2b, 0xfe, 0x03, 0xe8, 0x0c, 0x33, 0xcc, 0xbd, 0xba, 0x92, 0x2c, 0xdb, + 0xb2, 0x3c, 0xed, 0x4c, 0x57, 0xec, 0xa2, 0x73, 0x7f, 0xe7, 0x77, 0xee, 0x39, 0xf7, 0x7c, 0x39, + 0xa0, 0xec, 0x5d, 0xa4, 0x8a, 0x61, 0x57, 0x35, 0xc7, 0xa8, 0xee, 0x36, 0xed, 0x03, 0xdd, 0xb6, + 0x3c, 0xd7, 0x6e, 0x56, 0xf7, 0xcf, 0xee, 0x10, 0x4f, 0x5b, 0xa9, 0xd6, 0x89, 0x45, 0x5c, 0xcd, + 0x23, 0x35, 0xc5, 0x71, 0x6d, 0xcf, 0x46, 0xe5, 0x00, 0xaf, 0x68, 0x8e, 0xa1, 0x74, 0xe0, 0x15, + 0x81, 0x5f, 0x3a, 0x5d, 0x37, 0xbc, 0x86, 0xbf, 0xa3, 0xe8, 0xb6, 0x59, 0xad, 0xdb, 0x75, 0xbb, + 0xca, 0xd5, 0x76, 0xfc, 0x5d, 0xfe, 0xc5, 0x3f, 0xf8, 0x5f, 0x01, 0xdd, 0xd2, 0xb9, 0xd8, 0xbc, + 0xa9, 0xe9, 0x0d, 0xc3, 0x22, 0xee, 0x61, 0xd5, 0xd9, 0xab, 0x33, 0x01, 0xad, 0x9a, 0xc4, 0xd3, + 0xaa, 0xfb, 0x67, 0xbb, 0x2f, 0xb1, 0x54, 0x4d, 0xd3, 0x72, 0x7d, 0xcb, 0x33, 0x4c, 0xd2, 0xa3, + 0xf0, 0x5a, 0x96, 0x02, 0xd5, 0x1b, 0xc4, 0xd4, 0xba, 0xf5, 0xe4, 0x1f, 0x24, 0x58, 0xbe, 0xfc, + 0x29, 0x31, 0x1d, 0x6f, 0xcb, 0x35, 0x6c, 0xd7, 0xf0, 0x0e, 0xd7, 0xc9, 0x3e, 0x69, 0xae, 0xd9, + 0xd6, 0xae, 0x51, 0xf7, 0x5d, 0xcd, 0x33, 0x6c, 0x0b, 0xdd, 0x86, 0x92, 0x65, 0x9b, 0x86, 0xa5, + 0x31, 0xb9, 0xee, 0xbb, 0x2e, 0xb1, 0xf4, 0xc3, 0xed, 0x86, 0xe6, 0x12, 0x5a, 0x92, 0x96, 0xa5, + 0xff, 0x15, 0xd4, 0x7f, 0xb7, 0x5b, 0x95, 0xd2, 0x66, 0x0a, 0x06, 0xa7, 0x6a, 0xa3, 0xb7, 0x60, + 0xb6, 0x49, 0xac, 0x9a, 0xb6, 0xd3, 0x24, 0x5b, 0xc4, 0xd5, 0x89, 0xe5, 0x95, 0x72, 0x9c, 0x70, + 0xbe, 0xdd, 0xaa, 0xcc, 0xae, 0x27, 0x8f, 0x70, 0x37, 0x56, 0xbe, 0x03, 0x8b, 0xef, 0x35, 0xed, + 0x83, 0x4b, 0x06, 0xf5, 0x0c, 0xab, 0xee, 0x1b, 0xb4, 0x41, 0xdc, 0x0d, 0xe2, 0x35, 0xec, 0x1a, + 0x7a, 0x07, 0xf2, 0xde, 0xa1, 0x43, 0xf8, 0xfd, 0x8a, 0xea, 0xc9, 0x47, 0xad, 0xca, 0x48, 0xbb, + 0x55, 0xc9, 0xdf, 0x3c, 0x74, 0xc8, 0xb3, 0x56, 0xe5, 0x58, 0x8a, 0x1a, 0x3b, 0xc6, 0x5c, 0x51, + 0xfe, 0x2a, 0x07, 0xc0, 0x50, 0xdb, 0x3c, 0x70, 0xe8, 0x1e, 0x4c, 0xb0, 0xc7, 0xaa, 0x69, 0x9e, + 0xc6, 0x39, 0x27, 0x57, 0xce, 0x28, 0x71, 0xa6, 0x44, 0x31, 0x57, 0x9c, 0xbd, 0x3a, 0x13, 0x50, + 0x85, 0xa1, 0x95, 0xfd, 0xb3, 0xca, 0xf5, 0x9d, 0xfb, 0x44, 0xf7, 0x36, 0x88, 0xa7, 0xa9, 0x48, + 0xdc, 0x02, 0x62, 0x19, 0x8e, 0x58, 0xd1, 0x16, 0xe4, 0xa9, 0x43, 0x74, 0x1e, 0x80, 0xc9, 0x15, + 0x45, 0x19, 0x9c, 0x87, 0x4a, 0x7c, 0xb7, 0x6d, 0x87, 0xe8, 0xea, 0x54, 0xe8, 0x21, 0xfb, 0xc2, + 0x9c, 0x09, 0xdd, 0x86, 0x31, 0xea, 0x69, 0x9e, 0x4f, 0x4b, 0xa3, 0x3d, 0x37, 0xce, 0xe2, 0xe4, + 0x7a, 0xea, 0x8c, 0x60, 0x1d, 0x0b, 0xbe, 0xb1, 0xe0, 0x93, 0x9f, 0xe4, 0x60, 0x3e, 0x06, 0xaf, + 0xd9, 0x56, 0xcd, 0xe0, 0x99, 0xf2, 0x66, 0x22, 0xea, 0x27, 0xba, 0xa2, 0xbe, 0xd8, 0x47, 0x25, + 0x8e, 0x38, 0x7a, 0x3d, 0xba, 0x6e, 0x8e, 0xab, 0x1f, 0x4f, 0x1a, 0x7f, 0xd6, 0xaa, 0xcc, 0x46, + 0x6a, 0xc9, 0xfb, 0xa0, 0x7d, 0x40, 0x4d, 0x8d, 0x7a, 0x37, 0x5d, 0xcd, 0xa2, 0x01, 0xad, 0x61, + 0x12, 0xe1, 0xf5, 0x2b, 0xc3, 0xbd, 0x13, 0xd3, 0x50, 0x97, 0x84, 0x49, 0xb4, 0xde, 0xc3, 0x86, + 0xfb, 0x58, 0x40, 0xff, 0x85, 0x31, 0x97, 0x68, 0xd4, 0xb6, 0x4a, 0x79, 0x7e, 0xe5, 0x28, 0x5e, + 0x98, 0x4b, 0xb1, 0x38, 0x45, 0xff, 0x87, 0x71, 0x93, 0x50, 0xaa, 0xd5, 0x49, 0xa9, 0xc0, 0x81, + 0xb3, 0x02, 0x38, 0xbe, 0x11, 0x88, 0x71, 0x78, 0x2e, 0xff, 0x28, 0xc1, 0x4c, 0x1c, 0xa7, 0x75, + 0x83, 0x7a, 0xe8, 0x6e, 0x4f, 0xee, 0x29, 0xc3, 0xf9, 0xc4, 0xb4, 0x79, 0xe6, 0xcd, 0x09, 0x73, + 0x13, 0xa1, 0xa4, 0x23, 0xef, 0xae, 0x43, 0xc1, 0xf0, 0x88, 0xc9, 0xa2, 0x3e, 0xda, 0x15, 0xae, + 0x8c, 0x24, 0x51, 0xa7, 0x05, 0x6d, 0xe1, 0x2a, 0x23, 0xc0, 0x01, 0x8f, 0xfc, 0xfb, 0x68, 0xa7, + 0x07, 0x2c, 0x1f, 0xd1, 0xb7, 0x12, 0x2c, 0x39, 0xa9, 0x0d, 0x46, 0x38, 0xb5, 0x96, 0x65, 0x39, + 0xbd, 0x45, 0x61, 0xb2, 0x4b, 0x58, 0x5f, 0x21, 0xaa, 0x2c, 0xae, 0xb4, 0x34, 0x00, 0x3c, 0xe0, + 0x2a, 0xe8, 0x03, 0x40, 0xa6, 0xe6, 0xb1, 0x88, 0xd6, 0xb7, 0x5c, 0xa2, 0x93, 0x1a, 0x63, 0x15, + 0x4d, 0x29, 0xca, 0x8e, 0x8d, 0x1e, 0x04, 0xee, 0xa3, 0x85, 0xbe, 0x90, 0x60, 0xbe, 0xd6, 0xdb, + 0x64, 0x44, 0x5e, 0x5e, 0x18, 0x26, 0xd0, 0x7d, 0x7a, 0x94, 0xba, 0xd8, 0x6e, 0x55, 0xe6, 0xfb, + 0x1c, 0xe0, 0x7e, 0xc6, 0xd0, 0x5d, 0x28, 0xb8, 0x7e, 0x93, 0xd0, 0x52, 0x9e, 0x3f, 0x6f, 0xa6, + 0xd5, 0x2d, 0xbb, 0x69, 0xe8, 0x87, 0x98, 0xa9, 0x7c, 0x64, 0x78, 0x8d, 0x6d, 0x9f, 0xf7, 0x2a, + 0x1a, 0xbf, 0x35, 0x3f, 0xc2, 0x01, 0xa9, 0xfc, 0x10, 0xe6, 0xba, 0x9b, 0x06, 0xaa, 0x03, 0xe8, + 0x61, 0x9d, 0xb2, 0x01, 0xc1, 0xcc, 0xbe, 0x3a, 0x7c, 0x56, 0x45, 0x35, 0x1e, 0xf7, 0xcb, 0x48, + 0x44, 0x71, 0x07, 0xb5, 0x7c, 0x06, 0xa6, 0xae, 0xb8, 0xb6, 0xef, 0x88, 0x3b, 0xa2, 0x65, 0xc8, + 0x5b, 0x9a, 0x19, 0x76, 0x9f, 0xa8, 0x23, 0x6e, 0x6a, 0x26, 0xc1, 0xfc, 0x44, 0xfe, 0x46, 0x82, + 0xe9, 0x75, 0xc3, 0x34, 0x3c, 0x4c, 0xa8, 0x63, 0x5b, 0x94, 0xa0, 0xf3, 0x89, 0x8e, 0x75, 0xbc, + 0xab, 0x63, 0x1d, 0x49, 0x80, 0x3b, 0x7a, 0xd5, 0xc7, 0x30, 0xfe, 0xc0, 0x27, 0xbe, 0x61, 0xd5, + 0x45, 0xbf, 0x3e, 0x97, 0xe5, 0xe0, 0x8d, 0x00, 0x9e, 0xc8, 0x36, 0x75, 0x92, 0xb5, 0x00, 0x71, + 0x82, 0x43, 0x46, 0xf9, 0xaf, 0x1c, 0x1c, 0xe7, 0x86, 0x49, 0x6d, 0xc0, 0x54, 0xbe, 0x0b, 0x25, + 0x8d, 0x52, 0xdf, 0x25, 0xb5, 0xb4, 0xa9, 0xbc, 0x2c, 0xbc, 0x29, 0xad, 0xa6, 0xe0, 0x70, 0x2a, + 0x03, 0xba, 0x0f, 0xd3, 0xcd, 0x4e, 0xdf, 0x85, 0x9b, 0xa7, 0xb3, 0xdc, 0x4c, 0x04, 0x4c, 0x5d, + 0x10, 0x37, 0x48, 0x06, 0x1d, 0x27, 0xa9, 0xfb, 0x6d, 0x01, 0xa3, 0xc3, 0x6f, 0x01, 0xe8, 0x3a, + 0x2c, 0xec, 0xd8, 0xae, 0x6b, 0x1f, 0x18, 0x56, 0x9d, 0xdb, 0x09, 0x49, 0xf2, 0x9c, 0xe4, 0x5f, + 0xed, 0x56, 0x65, 0x41, 0xed, 0x07, 0xc0, 0xfd, 0xf5, 0xe4, 0x03, 0x58, 0xd8, 0x64, 0x3d, 0x85, + 0xda, 0xbe, 0xab, 0x93, 0xb8, 0x20, 0x50, 0x05, 0x0a, 0xfb, 0xc4, 0xdd, 0x09, 0x92, 0xba, 0xa8, + 0x16, 0x59, 0x39, 0x7c, 0xc8, 0x04, 0x38, 0x90, 0x33, 0x4f, 0xac, 0x58, 0xf3, 0x16, 0x5e, 0xa7, + 0xa5, 0x31, 0x0e, 0xe5, 0x9e, 0x6c, 0x26, 0x8f, 0x70, 0x37, 0x56, 0x6e, 0xe5, 0x60, 0x31, 0xa5, + 0xfe, 0xd0, 0x2d, 0x98, 0xa0, 0xe2, 0x6f, 0x51, 0x53, 0x27, 0xb2, 0xde, 0x42, 0xe8, 0xc6, 0xdd, + 0x3f, 0x24, 0xc3, 0x11, 0x15, 0xb2, 0x61, 0xda, 0x15, 0x57, 0xe0, 0x36, 0xc5, 0x14, 0x58, 0xc9, + 0xe2, 0xee, 0x8d, 0x4e, 0xfc, 0xd8, 0xb8, 0x93, 0x10, 0x27, 0xf9, 0xd1, 0x43, 0x98, 0xeb, 0x70, + 0x3b, 0xb0, 0x39, 0xca, 0x6d, 0x9e, 0xcf, 0xb2, 0xd9, 0xf7, 0x51, 0xd4, 0x92, 0x30, 0x3b, 0xb7, + 0xd9, 0x45, 0x8b, 0x7b, 0x0c, 0xc9, 0x3f, 0xe7, 0x60, 0xc0, 0x60, 0x78, 0x09, 0x4b, 0xde, 0xbd, + 0xc4, 0x92, 0xf7, 0xf6, 0xf3, 0x4f, 0xbc, 0xd4, 0xa5, 0xaf, 0xd1, 0xb5, 0xf4, 0xbd, 0xfb, 0x02, + 0x36, 0x06, 0x2f, 0x81, 0x7f, 0xe4, 0xe0, 0x3f, 0xe9, 0xca, 0xf1, 0x52, 0x78, 0x2d, 0xd1, 0x62, + 0x2f, 0x74, 0xb5, 0xd8, 0x13, 0x43, 0x50, 0xfc, 0xb3, 0x24, 0x76, 0x2d, 0x89, 0xbf, 0x48, 0x50, + 0x4e, 0x8f, 0xdb, 0x4b, 0x58, 0x1a, 0x3f, 0x49, 0x2e, 0x8d, 0x6f, 0x3c, 0x7f, 0x92, 0xa5, 0x2c, + 0x91, 0x57, 0x06, 0xe5, 0x56, 0xb4, 0xee, 0x0d, 0x31, 0xf2, 0xbf, 0xcb, 0x0d, 0x0a, 0x15, 0xdf, + 0x4e, 0x33, 0x7e, 0xb5, 0x24, 0xb4, 0x2f, 0x5b, 0x6c, 0xf4, 0x98, 0x6c, 0x7a, 0x04, 0x09, 0xd9, + 0x80, 0xf1, 0x66, 0x30, 0xab, 0x45, 0x51, 0xaf, 0x0e, 0x35, 0x22, 0x07, 0x8d, 0xf6, 0x60, 0x2d, + 0x10, 0x30, 0x1c, 0xd2, 0xa3, 0x1a, 0x8c, 0x11, 0xfe, 0x53, 0x7d, 0xd8, 0xca, 0xce, 0xfa, 0x61, + 0xaf, 0x02, 0xcb, 0xc2, 0x00, 0x85, 0x05, 0xb7, 0xfc, 0xb5, 0x04, 0xcb, 0x59, 0x2d, 0x01, 0x1d, + 0xf4, 0x59, 0xf1, 0x5e, 0x60, 0x7d, 0x1f, 0x7e, 0xe5, 0xfb, 0x5e, 0x82, 0xa3, 0xfd, 0x36, 0x29, + 0x56, 0x64, 0x6c, 0x7d, 0x8a, 0x76, 0x9f, 0xa8, 0xc8, 0x6e, 0x70, 0x29, 0x16, 0xa7, 0xe8, 0x14, + 0x4c, 0x34, 0x34, 0xab, 0xb6, 0x6d, 0x7c, 0x16, 0x6e, 0xf5, 0x51, 0x9a, 0xbf, 0x2f, 0xe4, 0x38, + 0x42, 0xa0, 0x4b, 0x30, 0xc7, 0xf5, 0xd6, 0x89, 0x55, 0xf7, 0x1a, 0xfc, 0x45, 0xc4, 0x6a, 0x12, + 0x4d, 0x9d, 0x1b, 0x5d, 0xe7, 0xb8, 0x47, 0x43, 0xfe, 0x53, 0x02, 0xf4, 0x3c, 0xdb, 0xc4, 0x49, + 0x28, 0x6a, 0x8e, 0xc1, 0x57, 0xdc, 0xa0, 0xd0, 0x8a, 0xea, 0x74, 0xbb, 0x55, 0x29, 0xae, 0x6e, + 0x5d, 0x0d, 0x84, 0x38, 0x3e, 0x67, 0xe0, 0x70, 0xd0, 0x06, 0x03, 0x55, 0x80, 0x43, 0xc3, 0x14, + 0xc7, 0xe7, 0xe8, 0x22, 0x4c, 0xe9, 0x4d, 0x9f, 0x7a, 0xc4, 0xdd, 0xd6, 0x6d, 0x87, 0xf0, 0xc6, + 0x34, 0xa1, 0x1e, 0x15, 0x3e, 0x4d, 0xad, 0x75, 0x9c, 0xe1, 0x04, 0x12, 0x29, 0x00, 0xac, 0xac, + 0xa8, 0xa3, 0x31, 0x3b, 0x05, 0x6e, 0x67, 0x86, 0x3d, 0xd8, 0x66, 0x24, 0xc5, 0x1d, 0x08, 0xf9, + 0x3e, 0x2c, 0x6c, 0x13, 0x77, 0xdf, 0xd0, 0xc9, 0xaa, 0xae, 0xdb, 0xbe, 0xe5, 0x85, 0xcb, 0x7a, + 0x15, 0x8a, 0x11, 0x4c, 0x54, 0xde, 0x11, 0x61, 0xbf, 0x18, 0x71, 0xe1, 0x18, 0x13, 0x95, 0x7a, + 0x2e, 0xb5, 0xd4, 0x7f, 0xca, 0xc1, 0x78, 0x4c, 0x9f, 0xdf, 0x33, 0xac, 0x9a, 0x60, 0x3e, 0x16, + 0xa2, 0xaf, 0x19, 0x56, 0xed, 0x59, 0xab, 0x32, 0x29, 0x60, 0xec, 0x13, 0x73, 0x20, 0xba, 0x0a, + 0x79, 0x9f, 0x12, 0x57, 0x14, 0xf1, 0xc9, 0xac, 0x64, 0xbe, 0x45, 0x89, 0x1b, 0xee, 0x57, 0x13, + 0x8c, 0x99, 0x09, 0x30, 0xa7, 0x40, 0x1b, 0x50, 0xa8, 0xb3, 0x47, 0x11, 0x75, 0x7a, 0x2a, 0x8b, + 0xab, 0xf3, 0x47, 0x4c, 0x90, 0x06, 0x5c, 0x82, 0x03, 0x16, 0xf4, 0x00, 0x66, 0x68, 0x22, 0x84, + 0xfc, 0xb9, 0x86, 0xd8, 0x97, 0xfa, 0x06, 0x5e, 0x45, 0xed, 0x56, 0x65, 0x26, 0x79, 0x84, 0xbb, + 0x0c, 0xc8, 0x55, 0x98, 0xec, 0x70, 0x30, 0xbb, 0xcb, 0xaa, 0x97, 0x1e, 0x3d, 0x2d, 0x8f, 0x3c, + 0x7e, 0x5a, 0x1e, 0x79, 0xf2, 0xb4, 0x3c, 0xf2, 0x79, 0xbb, 0x2c, 0x3d, 0x6a, 0x97, 0xa5, 0xc7, + 0xed, 0xb2, 0xf4, 0xa4, 0x5d, 0x96, 0x7e, 0x6d, 0x97, 0xa5, 0x2f, 0x7f, 0x2b, 0x8f, 0xdc, 0x29, + 0x0f, 0xfe, 0x5f, 0xec, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xd5, 0xd0, 0x62, 0xac, 0x15, + 0x00, 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { @@ -1154,6 +1223,16 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i _ = i var l int _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } { size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -1477,6 +1556,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.Limited != nil { { size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) @@ -1769,6 +1860,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + func (m *FlowDistinguisherMethod) Size() (n int) { if m == nil { return 0 @@ -1903,6 +2009,12 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } return n } @@ -2028,6 +2140,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) { l = m.Limited.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2145,6 +2261,17 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} func (this *FlowDistinguisherMethod) String() string { if this == nil { return "nil" @@ -2258,6 +2385,8 @@ func (this *LimitedPriorityLevelConfiguration) String() string { s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, `}`, }, "") return s @@ -2359,6 +2488,7 @@ func (this *PriorityLevelConfigurationSpec) String() string { s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, `}`, }, "") return s @@ -2446,6 +2576,96 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3542,6 +3762,46 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4485,6 +4745,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index 20c620052..d6073fc92 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -26,7 +26,41 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta2"; +option go_package = "k8s.io/api/flowcontrol/v1beta2"; + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} // FlowDistinguisherMethod specifies the method of a flow distinguisher. message FlowDistinguisherMethod { @@ -42,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -67,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -81,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -158,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces @@ -176,6 +210,35 @@ message LimitedPriorityLevelConfiguration { // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; } // NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the @@ -232,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -257,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -271,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; @@ -303,6 +366,14 @@ message PriorityLevelConfigurationSpec { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; } // PriorityLevelConfigurationStatus represents the current state of a "request-priority". diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go index 3ee00f293..c66cb173f 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go @@ -77,7 +77,9 @@ const ( // is a boolean false or has an invalid boolean representation // (if the cluster operator sets it to 'false' it will be stomped) // - any changes to the spec made by the cluster operator will be - // stomped. + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". // // The kube-apiserver will apply updates on the suggested configuration if: // - the cluster operator has enabled auto-update by setting the annotation @@ -105,6 +107,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -126,6 +129,7 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -380,6 +384,7 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -400,6 +405,7 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.23 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -431,6 +437,14 @@ type PriorityLevelConfigurationSpec struct { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` } // PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level @@ -452,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must + // priority level that may be executing at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces @@ -470,6 +484,72 @@ type LimitedPriorityLevelConfiguration struct { // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. } // LimitResponse defines how to handle requests that can not be executed right now. diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 7efe477d2..921122731 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -24,9 +24,19 @@ package v1beta2 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + var map_FlowDistinguisherMethod = map[string]string{ "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", @@ -112,8 +122,10 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", } func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { @@ -188,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{ "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", } func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go index e6288a687..e0605b95d 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go @@ -25,6 +25,32 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { *out = *in @@ -212,6 +238,16 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } return } @@ -390,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu *out = new(LimitedPriorityLevelConfiguration) (*in).DeepCopyInto(*out) } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go index 00cefde41..2abad4e19 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.prerelease-lifecycle.go @@ -21,6 +21,10 @@ limitations under the License. package v1beta2 +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { @@ -33,6 +37,12 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { @@ -51,6 +61,12 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { @@ -69,6 +85,12 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { @@ -87,6 +109,12 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor return 1, 26 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go new file mode 100644 index 000000000..cd60cfef7 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=flowcontrol.apiserver.k8s.io + +// Package v1beta3 holds api types of version v1beta3 for group "flowcontrol.apiserver.k8s.io". +package v1beta3 // import "k8s.io/api/flowcontrol/v1beta3" diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go new file mode 100644 index 000000000..e0a3fc1e1 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go @@ -0,0 +1,5662 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/flowcontrol/v1beta3/generated.proto + +package v1beta3 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } +func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} +func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{0} +} +func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src) +} +func (m *ExemptPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } +func (*FlowDistinguisherMethod) ProtoMessage() {} +func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{1} +} +func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src) +} +func (m *FlowDistinguisherMethod) XXX_Size() int { + return m.Size() +} +func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() { + xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo + +func (m *FlowSchema) Reset() { *m = FlowSchema{} } +func (*FlowSchema) ProtoMessage() {} +func (*FlowSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{2} +} +func (m *FlowSchema) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchema.Merge(m, src) +} +func (m *FlowSchema) XXX_Size() int { + return m.Size() +} +func (m *FlowSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchema proto.InternalMessageInfo + +func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } +func (*FlowSchemaCondition) ProtoMessage() {} +func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{3} +} +func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaCondition.Merge(m, src) +} +func (m *FlowSchemaCondition) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaCondition) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo + +func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } +func (*FlowSchemaList) ProtoMessage() {} +func (*FlowSchemaList) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{4} +} +func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaList.Merge(m, src) +} +func (m *FlowSchemaList) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaList) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaList.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo + +func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } +func (*FlowSchemaSpec) ProtoMessage() {} +func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{5} +} +func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaSpec.Merge(m, src) +} +func (m *FlowSchemaSpec) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo + +func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } +func (*FlowSchemaStatus) ProtoMessage() {} +func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{6} +} +func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlowSchemaStatus.Merge(m, src) +} +func (m *FlowSchemaStatus) XXX_Size() int { + return m.Size() +} +func (m *FlowSchemaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo + +func (m *GroupSubject) Reset() { *m = GroupSubject{} } +func (*GroupSubject) ProtoMessage() {} +func (*GroupSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{7} +} +func (m *GroupSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSubject.Merge(m, src) +} +func (m *GroupSubject) XXX_Size() int { + return m.Size() +} +func (m *GroupSubject) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSubject proto.InternalMessageInfo + +func (m *LimitResponse) Reset() { *m = LimitResponse{} } +func (*LimitResponse) ProtoMessage() {} +func (*LimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{8} +} +func (m *LimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitResponse.Merge(m, src) +} +func (m *LimitResponse) XXX_Size() int { + return m.Size() +} +func (m *LimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitResponse proto.InternalMessageInfo + +func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } +func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} +func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{9} +} +func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src) +} +func (m *LimitedPriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo + +func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } +func (*NonResourcePolicyRule) ProtoMessage() {} +func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{10} +} +func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourcePolicyRule.Merge(m, src) +} +func (m *NonResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo + +func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } +func (*PolicyRulesWithSubjects) ProtoMessage() {} +func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{11} +} +func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src) +} +func (m *PolicyRulesWithSubjects) XXX_Size() int { + return m.Size() +} +func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo + +func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } +func (*PriorityLevelConfiguration) ProtoMessage() {} +func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{12} +} +func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src) +} +func (m *PriorityLevelConfiguration) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } +func (*PriorityLevelConfigurationCondition) ProtoMessage() {} +func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{13} +} +func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src) +} +func (m *PriorityLevelConfigurationCondition) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } +func (*PriorityLevelConfigurationList) ProtoMessage() {} +func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{14} +} +func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src) +} +func (m *PriorityLevelConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } +func (*PriorityLevelConfigurationReference) ProtoMessage() {} +func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{15} +} +func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src) +} +func (m *PriorityLevelConfigurationReference) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } +func (*PriorityLevelConfigurationSpec) ProtoMessage() {} +func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{16} +} +func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src) +} +func (m *PriorityLevelConfigurationSpec) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo + +func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } +func (*PriorityLevelConfigurationStatus) ProtoMessage() {} +func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{17} +} +func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src) +} +func (m *PriorityLevelConfigurationStatus) XXX_Size() int { + return m.Size() +} +func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo + +func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } +func (*QueuingConfiguration) ProtoMessage() {} +func (*QueuingConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{18} +} +func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QueuingConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueuingConfiguration.Merge(m, src) +} +func (m *QueuingConfiguration) XXX_Size() int { + return m.Size() +} +func (m *QueuingConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo + +func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } +func (*ResourcePolicyRule) ProtoMessage() {} +func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{19} +} +func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePolicyRule.Merge(m, src) +} +func (m *ResourcePolicyRule) XXX_Size() int { + return m.Size() +} +func (m *ResourcePolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo + +func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } +func (*ServiceAccountSubject) ProtoMessage() {} +func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{20} +} +func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountSubject.Merge(m, src) +} +func (m *ServiceAccountSubject) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountSubject) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{21} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func (m *UserSubject) Reset() { *m = UserSubject{} } +func (*UserSubject) ProtoMessage() {} +func (*UserSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_52ab6629c083d251, []int{22} +} +func (m *UserSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserSubject.Merge(m, src) +} +func (m *UserSubject) XXX_Size() int { + return m.Size() +} +func (m *UserSubject) XXX_DiscardUnknown() { + xxx_messageInfo_UserSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_UserSubject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration") + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta3.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta3.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta3.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta3.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta3.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.UserSubject") +} + +func init() { + proto.RegisterFile("k8s.io/api/flowcontrol/v1beta3/generated.proto", fileDescriptor_52ab6629c083d251) +} + +var fileDescriptor_52ab6629c083d251 = []byte{ + // 1589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x6f, 0xdc, 0x54, + 0x17, 0x8f, 0x27, 0x33, 0x49, 0xe6, 0xe4, 0xd9, 0x9b, 0x46, 0x99, 0x2f, 0xfd, 0x34, 0x93, 0xfa, + 0x93, 0xbe, 0x02, 0x6d, 0x3d, 0x7d, 0xd2, 0x02, 0xe2, 0x51, 0xa7, 0xa5, 0x94, 0x26, 0x69, 0x7a, + 0xd3, 0x42, 0x55, 0x2a, 0x51, 0xc7, 0x73, 0xe3, 0x71, 0x33, 0x7e, 0xd4, 0xd7, 0x4e, 0x08, 0xdd, + 0x20, 0xfe, 0x02, 0xd6, 0xb0, 0x64, 0xc1, 0x8a, 0x0d, 0x5b, 0x16, 0x2c, 0xa9, 0x58, 0x75, 0xd9, + 0xd5, 0x40, 0x87, 0x15, 0xff, 0x01, 0x54, 0x42, 0x42, 0xf7, 0xfa, 0xda, 0x1e, 0xcf, 0xcb, 0xa3, + 0x54, 0xea, 0x8a, 0x5d, 0x7c, 0xee, 0x39, 0xbf, 0x73, 0xcf, 0xb9, 0xe7, 0xf1, 0x9b, 0x80, 0xb2, + 0x73, 0x91, 0x2a, 0xa6, 0x53, 0xd5, 0x5c, 0xb3, 0xba, 0xdd, 0x70, 0xf6, 0x74, 0xc7, 0xf6, 0x3d, + 0xa7, 0x51, 0xdd, 0x3d, 0xbd, 0x45, 0x7c, 0xed, 0x6c, 0xd5, 0x20, 0x36, 0xf1, 0x34, 0x9f, 0xd4, + 0x14, 0xd7, 0x73, 0x7c, 0x07, 0x95, 0x43, 0x7d, 0x45, 0x73, 0x4d, 0xa5, 0x4d, 0x5f, 0x11, 0xfa, + 0x4b, 0x27, 0x0d, 0xd3, 0xaf, 0x07, 0x5b, 0x8a, 0xee, 0x58, 0x55, 0xc3, 0x31, 0x9c, 0x2a, 0x37, + 0xdb, 0x0a, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x85, 0x70, 0x4b, 0xe7, 0x12, 0xf7, 0x96, 0xa6, + 0xd7, 0x4d, 0x9b, 0x78, 0xfb, 0x55, 0x77, 0xc7, 0x60, 0x02, 0x5a, 0xb5, 0x88, 0xaf, 0x55, 0x77, + 0x4f, 0x77, 0x5e, 0x62, 0xa9, 0xda, 0xcf, 0xca, 0x0b, 0x6c, 0xdf, 0xb4, 0x48, 0x97, 0xc1, 0xeb, + 0x59, 0x06, 0x54, 0xaf, 0x13, 0x4b, 0xeb, 0xb4, 0x93, 0x7f, 0x94, 0x60, 0xf9, 0xca, 0x67, 0xc4, + 0x72, 0xfd, 0x0d, 0xcf, 0x74, 0x3c, 0xd3, 0xdf, 0x5f, 0x25, 0xbb, 0xa4, 0xb1, 0xe2, 0xd8, 0xdb, + 0xa6, 0x11, 0x78, 0x9a, 0x6f, 0x3a, 0x36, 0xba, 0x03, 0x25, 0xdb, 0xb1, 0x4c, 0x5b, 0x63, 0x72, + 0x3d, 0xf0, 0x3c, 0x62, 0xeb, 0xfb, 0x9b, 0x75, 0xcd, 0x23, 0xb4, 0x24, 0x2d, 0x4b, 0xaf, 0x14, + 0xd4, 0xff, 0xb6, 0x9a, 0x95, 0xd2, 0x7a, 0x1f, 0x1d, 0xdc, 0xd7, 0x1a, 0xbd, 0x0d, 0xb3, 0x0d, + 0x62, 0xd7, 0xb4, 0xad, 0x06, 0xd9, 0x20, 0x9e, 0x4e, 0x6c, 0xbf, 0x94, 0xe3, 0x80, 0xf3, 0xad, + 0x66, 0x65, 0x76, 0x35, 0x7d, 0x84, 0x3b, 0x75, 0xe5, 0xbb, 0xb0, 0xf8, 0x7e, 0xc3, 0xd9, 0xbb, + 0x6c, 0x52, 0xdf, 0xb4, 0x8d, 0xc0, 0xa4, 0x75, 0xe2, 0xad, 0x11, 0xbf, 0xee, 0xd4, 0xd0, 0xbb, + 0x90, 0xf7, 0xf7, 0x5d, 0xc2, 0xef, 0x57, 0x54, 0x8f, 0x3f, 0x6e, 0x56, 0x46, 0x5a, 0xcd, 0x4a, + 0xfe, 0xd6, 0xbe, 0x4b, 0x9e, 0x37, 0x2b, 0x47, 0xfa, 0x98, 0xb1, 0x63, 0xcc, 0x0d, 0xe5, 0xaf, + 0x73, 0x00, 0x4c, 0x6b, 0x93, 0x27, 0x0e, 0xdd, 0x87, 0x09, 0xf6, 0x58, 0x35, 0xcd, 0xd7, 0x38, + 0xe6, 0xe4, 0x99, 0x53, 0x4a, 0x52, 0x29, 0x71, 0xce, 0x15, 0x77, 0xc7, 0x60, 0x02, 0xaa, 0x30, + 0x6d, 0x65, 0xf7, 0xb4, 0x72, 0x63, 0xeb, 0x01, 0xd1, 0xfd, 0x35, 0xe2, 0x6b, 0x2a, 0x12, 0xb7, + 0x80, 0x44, 0x86, 0x63, 0x54, 0xb4, 0x01, 0x79, 0xea, 0x12, 0x9d, 0x27, 0x60, 0xf2, 0x8c, 0xa2, + 0x0c, 0xae, 0x43, 0x25, 0xb9, 0xdb, 0xa6, 0x4b, 0x74, 0x75, 0x2a, 0x8a, 0x90, 0x7d, 0x61, 0x8e, + 0x84, 0xee, 0xc0, 0x18, 0xf5, 0x35, 0x3f, 0xa0, 0xa5, 0xd1, 0xae, 0x1b, 0x67, 0x61, 0x72, 0x3b, + 0x75, 0x46, 0xa0, 0x8e, 0x85, 0xdf, 0x58, 0xe0, 0xc9, 0x4f, 0x73, 0x30, 0x9f, 0x28, 0xaf, 0x38, + 0x76, 0xcd, 0xe4, 0x95, 0xf2, 0x56, 0x2a, 0xeb, 0xc7, 0x3a, 0xb2, 0xbe, 0xd8, 0xc3, 0x24, 0xc9, + 0x38, 0x7a, 0x23, 0xbe, 0x6e, 0x8e, 0x9b, 0x1f, 0x4d, 0x3b, 0x7f, 0xde, 0xac, 0xcc, 0xc6, 0x66, + 0xe9, 0xfb, 0xa0, 0x5d, 0x40, 0x0d, 0x8d, 0xfa, 0xb7, 0x3c, 0xcd, 0xa6, 0x21, 0xac, 0x69, 0x11, + 0x11, 0xf5, 0x6b, 0xc3, 0xbd, 0x13, 0xb3, 0x50, 0x97, 0x84, 0x4b, 0xb4, 0xda, 0x85, 0x86, 0x7b, + 0x78, 0x40, 0xff, 0x87, 0x31, 0x8f, 0x68, 0xd4, 0xb1, 0x4b, 0x79, 0x7e, 0xe5, 0x38, 0x5f, 0x98, + 0x4b, 0xb1, 0x38, 0x45, 0xaf, 0xc2, 0xb8, 0x45, 0x28, 0xd5, 0x0c, 0x52, 0x2a, 0x70, 0xc5, 0x59, + 0xa1, 0x38, 0xbe, 0x16, 0x8a, 0x71, 0x74, 0x2e, 0xff, 0x24, 0xc1, 0x4c, 0x92, 0xa7, 0x55, 0x93, + 0xfa, 0xe8, 0x5e, 0x57, 0xed, 0x29, 0xc3, 0xc5, 0xc4, 0xac, 0x79, 0xe5, 0xcd, 0x09, 0x77, 0x13, + 0x91, 0xa4, 0xad, 0xee, 0x6e, 0x40, 0xc1, 0xf4, 0x89, 0xc5, 0xb2, 0x3e, 0xda, 0x91, 0xae, 0x8c, + 0x22, 0x51, 0xa7, 0x05, 0x6c, 0xe1, 0x1a, 0x03, 0xc0, 0x21, 0x8e, 0xfc, 0xc7, 0x68, 0x7b, 0x04, + 0xac, 0x1e, 0xd1, 0x77, 0x12, 0x2c, 0xb9, 0x7d, 0x07, 0x8c, 0x08, 0x6a, 0x25, 0xcb, 0x73, 0xff, + 0x11, 0x85, 0xc9, 0x36, 0x61, 0x73, 0x85, 0xa8, 0xb2, 0xb8, 0xd2, 0xd2, 0x00, 0xe5, 0x01, 0x57, + 0x41, 0x1f, 0x02, 0xb2, 0x34, 0x9f, 0x65, 0xd4, 0xd8, 0xf0, 0x88, 0x4e, 0x6a, 0x0c, 0x55, 0x0c, + 0xa5, 0xb8, 0x3a, 0xd6, 0xba, 0x34, 0x70, 0x0f, 0x2b, 0xf4, 0xa5, 0x04, 0xf3, 0xb5, 0xee, 0x21, + 0x23, 0xea, 0xf2, 0xc2, 0x30, 0x89, 0xee, 0x31, 0xa3, 0xd4, 0xc5, 0x56, 0xb3, 0x32, 0xdf, 0xe3, + 0x00, 0xf7, 0x72, 0x86, 0xee, 0x41, 0xc1, 0x0b, 0x1a, 0x84, 0x96, 0xf2, 0xfc, 0x79, 0x33, 0xbd, + 0x6e, 0x38, 0x0d, 0x53, 0xdf, 0xc7, 0xcc, 0xe4, 0x63, 0xd3, 0xaf, 0x6f, 0x06, 0x7c, 0x56, 0xd1, + 0xe4, 0xad, 0xf9, 0x11, 0x0e, 0x41, 0xe5, 0x47, 0x30, 0xd7, 0x39, 0x34, 0x90, 0x01, 0xa0, 0x47, + 0x7d, 0xca, 0x16, 0x04, 0x73, 0x7b, 0x76, 0xf8, 0xaa, 0x8a, 0x7b, 0x3c, 0x99, 0x97, 0xb1, 0x88, + 0xe2, 0x36, 0x68, 0xf9, 0x14, 0x4c, 0x5d, 0xf5, 0x9c, 0xc0, 0x15, 0x77, 0x44, 0xcb, 0x90, 0xb7, + 0x35, 0x2b, 0x9a, 0x3e, 0xf1, 0x44, 0x5c, 0xd7, 0x2c, 0x82, 0xf9, 0x89, 0xfc, 0xad, 0x04, 0xd3, + 0xab, 0xa6, 0x65, 0xfa, 0x98, 0x50, 0xd7, 0xb1, 0x29, 0x41, 0xe7, 0x53, 0x13, 0xeb, 0x68, 0xc7, + 0xc4, 0x3a, 0x94, 0x52, 0x6e, 0x9b, 0x55, 0x9f, 0xc0, 0xf8, 0xc3, 0x80, 0x04, 0xa6, 0x6d, 0x88, + 0x79, 0x7d, 0x2e, 0x2b, 0xc0, 0x9b, 0xa1, 0x7a, 0xaa, 0xda, 0xd4, 0x49, 0x36, 0x02, 0xc4, 0x09, + 0x8e, 0x10, 0xe5, 0xbf, 0x73, 0x70, 0x94, 0x3b, 0x26, 0xb5, 0x01, 0x5b, 0xf9, 0x5e, 0xe6, 0x56, + 0x5e, 0x16, 0xd1, 0x1c, 0x64, 0x33, 0x3f, 0x80, 0xe9, 0x46, 0x7b, 0xec, 0x22, 0xcc, 0x93, 0x59, + 0x61, 0xa6, 0x12, 0xa6, 0x2e, 0x88, 0x1b, 0xa4, 0x93, 0x8e, 0xd3, 0xd0, 0xbd, 0x58, 0xc0, 0xe8, + 0xf0, 0x2c, 0x00, 0xdd, 0x80, 0x85, 0x2d, 0xc7, 0xf3, 0x9c, 0x3d, 0xd3, 0x36, 0xb8, 0x9f, 0x08, + 0x24, 0xcf, 0x41, 0xfe, 0xd3, 0x6a, 0x56, 0x16, 0xd4, 0x5e, 0x0a, 0xb8, 0xb7, 0x9d, 0xbc, 0x07, + 0x0b, 0xeb, 0x6c, 0xa6, 0x50, 0x27, 0xf0, 0x74, 0x92, 0x34, 0x04, 0xaa, 0x40, 0x61, 0x97, 0x78, + 0x5b, 0x61, 0x51, 0x17, 0xd5, 0x22, 0x6b, 0x87, 0x8f, 0x98, 0x00, 0x87, 0x72, 0x16, 0x89, 0x9d, + 0x58, 0xde, 0xc6, 0xab, 0xb4, 0x34, 0xc6, 0x55, 0x79, 0x24, 0xeb, 0xe9, 0x23, 0xdc, 0xa9, 0x2b, + 0x37, 0x73, 0xb0, 0xd8, 0xa7, 0xff, 0xd0, 0x6d, 0x98, 0xa0, 0xe2, 0x6f, 0xd1, 0x53, 0xc7, 0xb2, + 0xde, 0x42, 0xd8, 0x26, 0xd3, 0x3f, 0x02, 0xc3, 0x31, 0x14, 0x72, 0x60, 0xda, 0x13, 0x57, 0xe0, + 0x3e, 0xc5, 0x16, 0x38, 0x93, 0x85, 0xdd, 0x9d, 0x9d, 0xe4, 0xb1, 0x71, 0x3b, 0x20, 0x4e, 0xe3, + 0xa3, 0x47, 0x30, 0xd7, 0x16, 0x76, 0xe8, 0x73, 0x94, 0xfb, 0x3c, 0x9f, 0xe5, 0xb3, 0xe7, 0xa3, + 0xa8, 0x25, 0xe1, 0x76, 0x6e, 0xbd, 0x03, 0x16, 0x77, 0x39, 0x92, 0x7f, 0xc9, 0xc1, 0x80, 0xc5, + 0xf0, 0x12, 0x48, 0xde, 0xfd, 0x14, 0xc9, 0x7b, 0xe7, 0xe0, 0x1b, 0xaf, 0x2f, 0xe9, 0xab, 0x77, + 0x90, 0xbe, 0xf7, 0x5e, 0xc0, 0xc7, 0x60, 0x12, 0xf8, 0x67, 0x0e, 0xfe, 0xd7, 0xdf, 0x38, 0x21, + 0x85, 0xd7, 0x53, 0x23, 0xf6, 0x42, 0xc7, 0x88, 0x3d, 0x36, 0x04, 0xc4, 0xbf, 0x24, 0xb1, 0x83, + 0x24, 0xfe, 0x2a, 0x41, 0xb9, 0x7f, 0xde, 0x5e, 0x02, 0x69, 0xfc, 0x34, 0x4d, 0x1a, 0xdf, 0x3c, + 0x78, 0x91, 0xf5, 0x21, 0x91, 0x57, 0x07, 0xd5, 0x56, 0x4c, 0xf7, 0x86, 0x58, 0xf9, 0xdf, 0xe7, + 0x06, 0xa5, 0x8a, 0xb3, 0xd3, 0x8c, 0x5f, 0x2d, 0x29, 0xeb, 0x2b, 0x36, 0x5b, 0x3d, 0x16, 0xdb, + 0x1e, 0x61, 0x41, 0xd6, 0x61, 0xbc, 0x11, 0xee, 0x6a, 0xd1, 0xd4, 0x97, 0x86, 0x5a, 0x91, 0x83, + 0x56, 0x7b, 0x48, 0x0b, 0x84, 0x1a, 0x8e, 0xe0, 0x51, 0x0d, 0xc6, 0x08, 0xff, 0xa9, 0x3e, 0x6c, + 0x67, 0x67, 0xfd, 0xb0, 0x57, 0x81, 0x55, 0x61, 0xa8, 0x85, 0x05, 0xb6, 0xfc, 0x8d, 0x04, 0xcb, + 0x59, 0x23, 0x01, 0xed, 0xf5, 0xa0, 0x78, 0x2f, 0x40, 0xdf, 0x87, 0xa7, 0x7c, 0x3f, 0x48, 0x70, + 0xb8, 0x17, 0x93, 0x62, 0x4d, 0xc6, 0xe8, 0x53, 0xcc, 0x7d, 0xe2, 0x26, 0xbb, 0xc9, 0xa5, 0x58, + 0x9c, 0xa2, 0x13, 0x30, 0x51, 0xd7, 0xec, 0xda, 0xa6, 0xf9, 0x79, 0xc4, 0xea, 0xe3, 0x32, 0xff, + 0x40, 0xc8, 0x71, 0xac, 0x81, 0x2e, 0xc3, 0x1c, 0xb7, 0x5b, 0x25, 0xb6, 0xe1, 0xd7, 0xf9, 0x8b, + 0x08, 0x6a, 0x12, 0x6f, 0x9d, 0x9b, 0x1d, 0xe7, 0xb8, 0xcb, 0x42, 0xfe, 0x4b, 0x02, 0x74, 0x10, + 0x36, 0x71, 0x1c, 0x8a, 0x9a, 0x6b, 0x72, 0x8a, 0x1b, 0x36, 0x5a, 0x51, 0x9d, 0x6e, 0x35, 0x2b, + 0xc5, 0x4b, 0x1b, 0xd7, 0x42, 0x21, 0x4e, 0xce, 0x99, 0x72, 0xb4, 0x68, 0xc3, 0x85, 0x2a, 0x94, + 0x23, 0xc7, 0x14, 0x27, 0xe7, 0xe8, 0x22, 0x4c, 0xe9, 0x8d, 0x80, 0xfa, 0xc4, 0xdb, 0xd4, 0x1d, + 0x97, 0xf0, 0xc1, 0x34, 0xa1, 0x1e, 0x16, 0x31, 0x4d, 0xad, 0xb4, 0x9d, 0xe1, 0x94, 0x26, 0x52, + 0x00, 0x58, 0x5b, 0x51, 0x57, 0x63, 0x7e, 0x0a, 0xdc, 0xcf, 0x0c, 0x7b, 0xb0, 0xf5, 0x58, 0x8a, + 0xdb, 0x34, 0xe4, 0x07, 0xb0, 0xb0, 0x49, 0xbc, 0x5d, 0x53, 0x27, 0x97, 0x74, 0xdd, 0x09, 0x6c, + 0x3f, 0x22, 0xeb, 0x55, 0x28, 0xc6, 0x6a, 0xa2, 0xf3, 0x0e, 0x09, 0xff, 0xc5, 0x18, 0x0b, 0x27, + 0x3a, 0x71, 0xab, 0xe7, 0xfa, 0xb6, 0xfa, 0xcf, 0x39, 0x18, 0x4f, 0xe0, 0xf3, 0x3b, 0xa6, 0x5d, + 0x13, 0xc8, 0x47, 0x22, 0xed, 0xeb, 0xa6, 0x5d, 0x7b, 0xde, 0xac, 0x4c, 0x0a, 0x35, 0xf6, 0x89, + 0xb9, 0x22, 0xba, 0x06, 0xf9, 0x80, 0x12, 0x4f, 0x34, 0xf1, 0xf1, 0xac, 0x62, 0xbe, 0x4d, 0x89, + 0x17, 0xf1, 0xab, 0x09, 0x86, 0xcc, 0x04, 0x98, 0x43, 0xa0, 0x35, 0x28, 0x18, 0xec, 0x51, 0x44, + 0x9f, 0x9e, 0xc8, 0xc2, 0x6a, 0xff, 0x11, 0x13, 0x96, 0x01, 0x97, 0xe0, 0x10, 0x05, 0x3d, 0x84, + 0x19, 0x9a, 0x4a, 0x21, 0x7f, 0xae, 0x21, 0xf8, 0x52, 0xcf, 0xc4, 0xab, 0xa8, 0xd5, 0xac, 0xcc, + 0xa4, 0x8f, 0x70, 0x87, 0x03, 0xb9, 0x0a, 0x93, 0x6d, 0x01, 0x66, 0x4f, 0x59, 0xf5, 0xf2, 0xe3, + 0x67, 0xe5, 0x91, 0x27, 0xcf, 0xca, 0x23, 0x4f, 0x9f, 0x95, 0x47, 0xbe, 0x68, 0x95, 0xa5, 0xc7, + 0xad, 0xb2, 0xf4, 0xa4, 0x55, 0x96, 0x9e, 0xb6, 0xca, 0xd2, 0x6f, 0xad, 0xb2, 0xf4, 0xd5, 0xef, + 0xe5, 0x91, 0xbb, 0xe5, 0xc1, 0xff, 0x8b, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x03, 0x5d, 0xec, + 0x01, 0xac, 0x15, 0x00, 0x00, +} + +func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x10 + } + if m.NominalConcurrencyShares != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowDistinguisherMethod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowDistinguisherMethod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchema) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchema) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchema) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.DistinguisherMethod != nil { + { + size, err := m.DistinguisherMethod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MatchingPrecedence)) + i-- + dAtA[i] = 0x10 + { + size, err := m.PriorityLevelConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlowSchemaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlowSchemaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlowSchemaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Queuing != nil { + { + size, err := m.Queuing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitedPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitedPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BorrowingLimitPercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BorrowingLimitPercent)) + i-- + dAtA[i] = 0x20 + } + if m.LendablePercent != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.LimitResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.NominalConcurrencyShares)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *NonResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRulesWithSubjects) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRulesWithSubjects) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRulesWithSubjects) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Exempt != nil { + { + size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Limited != nil { + { + size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityLevelConfigurationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityLevelConfigurationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityLevelConfigurationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueuingConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueuingConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueuingConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.QueueLengthLimit)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HandSize)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Queues)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourcePolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ClusterScope { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccount != nil { + { + size, err := m.ServiceAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExemptPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NominalConcurrencyShares != nil { + n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) + } + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + return n +} + +func (m *FlowDistinguisherMethod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchema) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FlowSchemaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PriorityLevelConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MatchingPrecedence)) + if m.DistinguisherMethod != nil { + l = m.DistinguisherMethod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlowSchemaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Queuing != nil { + l = m.Queuing.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitedPriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.NominalConcurrencyShares)) + l = m.LimitResponse.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.LendablePercent != nil { + n += 1 + sovGenerated(uint64(*m.LendablePercent)) + } + if m.BorrowingLimitPercent != nil { + n += 1 + sovGenerated(uint64(*m.BorrowingLimitPercent)) + } + return n +} + +func (m *NonResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRulesWithSubjects) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PriorityLevelConfigurationReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityLevelConfigurationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Limited != nil { + l = m.Limited.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Exempt != nil { + l = m.Exempt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityLevelConfigurationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *QueuingConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Queues)) + n += 1 + sovGenerated(uint64(m.HandSize)) + n += 1 + sovGenerated(uint64(m.QueueLengthLimit)) + return n +} + +func (m *ResourcePolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccount != nil { + l = m.ServiceAccount.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ExemptPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `}`, + }, "") + return s +} +func (this *FlowDistinguisherMethod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowDistinguisherMethod{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchema) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchema{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "FlowSchemaSpec", "FlowSchemaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "FlowSchemaStatus", "FlowSchemaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlowSchemaCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]FlowSchema{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "FlowSchema", "FlowSchema", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&FlowSchemaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRulesWithSubjects{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRulesWithSubjects", "PolicyRulesWithSubjects", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&FlowSchemaSpec{`, + `PriorityLevelConfiguration:` + strings.Replace(strings.Replace(this.PriorityLevelConfiguration.String(), "PriorityLevelConfigurationReference", "PriorityLevelConfigurationReference", 1), `&`, ``, 1) + `,`, + `MatchingPrecedence:` + fmt.Sprintf("%v", this.MatchingPrecedence) + `,`, + `DistinguisherMethod:` + strings.Replace(this.DistinguisherMethod.String(), "FlowDistinguisherMethod", "FlowDistinguisherMethod", 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *FlowSchemaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]FlowSchemaCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "FlowSchemaCondition", "FlowSchemaCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&FlowSchemaStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *GroupSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LimitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitResponse{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Queuing:` + strings.Replace(this.Queuing.String(), "QueuingConfiguration", "QueuingConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitedPriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, + `NominalConcurrencyShares:` + fmt.Sprintf("%v", this.NominalConcurrencyShares) + `,`, + `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, + `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, + `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRulesWithSubjects) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + repeatedStringForResourceRules := "[]ResourcePolicyRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourcePolicyRule", "ResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourcePolicyRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourcePolicyRule", "NonResourcePolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&PolicyRulesWithSubjects{`, + `Subjects:` + repeatedStringForSubjects + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PriorityLevelConfigurationSpec", "PriorityLevelConfigurationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PriorityLevelConfigurationStatus", "PriorityLevelConfigurationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityLevelConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfiguration", "PriorityLevelConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`, + `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityLevelConfigurationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PriorityLevelConfigurationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PriorityLevelConfigurationCondition", "PriorityLevelConfigurationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&PriorityLevelConfigurationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *QueuingConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueuingConfiguration{`, + `Queues:` + fmt.Sprintf("%v", this.Queues) + `,`, + `HandSize:` + fmt.Sprintf("%v", this.HandSize) + `,`, + `QueueLengthLimit:` + fmt.Sprintf("%v", this.QueueLengthLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ClusterScope:` + fmt.Sprintf("%v", this.ClusterScope) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountSubject{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `User:` + strings.Replace(this.User.String(), "UserSubject", "UserSubject", 1) + `,`, + `Group:` + strings.Replace(this.Group.String(), "GroupSubject", "GroupSubject", 1) + `,`, + `ServiceAccount:` + strings.Replace(this.ServiceAccount.String(), "ServiceAccountSubject", "ServiceAccountSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserSubject{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NominalConcurrencyShares = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowDistinguisherMethod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowDistinguisherMethod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowDistinguisherMethodType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchema) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FlowSchemaConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, FlowSchema{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityLevelConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PriorityLevelConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchingPrecedence", wireType) + } + m.MatchingPrecedence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchingPrecedence |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DistinguisherMethod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DistinguisherMethod == nil { + m.DistinguisherMethod = &FlowDistinguisherMethod{} + } + if err := m.DistinguisherMethod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRulesWithSubjects{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlowSchemaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlowSchemaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlowSchemaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, FlowSchemaCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitResponseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queuing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Queuing == nil { + m.Queuing = &QueuingConfiguration{} + } + if err := m.Queuing.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitedPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + } + m.NominalConcurrencyShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NominalConcurrencyShares |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LimitResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LendablePercent = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BorrowingLimitPercent", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BorrowingLimitPercent = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRulesWithSubjects) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRulesWithSubjects: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourcePolicyRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourcePolicyRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelConfigurationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityLevelConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PriorityLevelEnablement(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limited", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limited == nil { + m.Limited = &LimitedPriorityLevelConfiguration{} + } + if err := m.Limited.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exempt == nil { + m.Exempt = &ExemptPriorityLevelConfiguration{} + } + if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityLevelConfigurationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityLevelConfigurationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PriorityLevelConfigurationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueuingConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueuingConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueuingConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Queues", wireType) + } + m.Queues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Queues |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HandSize", wireType) + } + m.HandSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HandSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueLengthLimit", wireType) + } + m.QueueLengthLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueueLengthLimit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterScope", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ClusterScope = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = SubjectKind(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserSubject{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &GroupSubject{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccount == nil { + m.ServiceAccount = &ServiceAccountSubject{} + } + if err := m.ServiceAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto similarity index 77% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto rename to vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto index 50a10e4b3..c6504d435 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto @@ -19,14 +19,48 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.flowcontrol.v1alpha1; +package k8s.io.api.flowcontrol.v1beta3; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "k8s.io/api/flowcontrol/v1beta3"; + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +message ExemptPriorityLevelConfiguration { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + optional int32 nominalConcurrencyShares = 1; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 2; +} // FlowDistinguisherMethod specifies the method of a flow distinguisher. message FlowDistinguisherMethod { @@ -42,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -67,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -81,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -119,6 +153,8 @@ message FlowSchemaStatus { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional repeated FlowSchemaCondition conditions = 1; } @@ -156,26 +192,55 @@ message LimitResponse { // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { - // `assuredConcurrencyShares` (ACS) configures the execution - // limit, which is a limit on the number of requests of this - // priority level that may be exeucting at a given time. ACS must - // be a positive number. The server's concurrency limit (SCL) is - // divided among the concurrency-controlled priority levels in - // proportion to their assured concurrency shares. This produces - // the assured concurrency value (ACV) --- the number of requests - // that may be executing at a time --- for each such priority - // level: + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: // - // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) // - // bigger numbers of ACS mean more reserved concurrent requests (at the - // expense of every other PL). + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. // This field has a default value of 30. // +optional - optional int32 assuredConcurrencyShares = 1; + optional int32 nominalConcurrencyShares = 1; // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + optional int32 lendablePercent = 3; + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + optional int32 borrowingLimitPercent = 4; } // NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the @@ -232,7 +297,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -257,7 +322,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -271,7 +336,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; @@ -303,6 +368,14 @@ message PriorityLevelConfigurationSpec { // This field must be non-empty if and only if `type` is `"Limited"`. // +optional optional LimitedPriorityLevelConfiguration limited = 2; + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + optional ExemptPriorityLevelConfiguration exempt = 3; } // PriorityLevelConfigurationStatus represents the current state of a "request-priority". @@ -310,6 +383,8 @@ message PriorityLevelConfigurationStatus { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge // +optional repeated PriorityLevelConfigurationCondition conditions = 1; } diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/register.go b/vendor/k8s.io/api/flowcontrol/v1beta3/register.go new file mode 100644 index 000000000..baf10b182 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the name of api group +const GroupName = "flowcontrol.apiserver.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta3"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder installs the api group to a scheme + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds api to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &FlowSchema{}, + &FlowSchemaList{}, + &PriorityLevelConfiguration{}, + &PriorityLevelConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go new file mode 100644 index 000000000..0ffc22a23 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go @@ -0,0 +1,677 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// These are valid wildcards. +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + NameAll = "*" + + NamespaceEvery = "*" // matches every particular namespace +) + +// System preset priority level names +const ( + PriorityLevelConfigurationNameExempt = "exempt" + PriorityLevelConfigurationNameCatchAll = "catch-all" + FlowSchemaNameExempt = "exempt" + FlowSchemaNameCatchAll = "catch-all" +) + +// Conditions +const ( + FlowSchemaConditionDangling = "Dangling" + + PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared" +) + +// Constants used by api validation. +const ( + FlowSchemaMaxMatchingPrecedence int32 = 10000 +) + +// Constants for apiserver response headers. +const ( + ResponseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID" + ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" +) + +const ( + // AutoUpdateAnnotationKey is the name of an annotation that enables + // automatic update of the spec of the bootstrap configuration + // object(s), if set to 'true'. + // + // On a fresh install, all bootstrap configuration objects will have auto + // update enabled with the following annotation key: + // apf.kubernetes.io/autoupdate-spec: 'true' + // + // The kube-apiserver periodically checks the bootstrap configuration + // objects on the cluster and applies updates if necessary. + // + // kube-apiserver enforces an 'always auto-update' policy for the + // mandatory configuration object(s). This implies: + // - the auto-update annotation key is added with a value of 'true' + // if it is missing. + // - the auto-update annotation key is set to 'true' if its current value + // is a boolean false or has an invalid boolean representation + // (if the cluster operator sets it to 'false' it will be stomped) + // - any changes to the spec made by the cluster operator will be + // stomped, except for changes to the `nominalConcurrencyShares` + // and `lendablePercent` fields of the PriorityLevelConfiguration + // named "exempt". + // + // The kube-apiserver will apply updates on the suggested configuration if: + // - the cluster operator has enabled auto-update by setting the annotation + // (apf.kubernetes.io/autoupdate-spec: 'true') or + // - the annotation key is missing but the generation is 1 + // + // If the suggested configuration object is missing the annotation key, + // kube-apiserver will update the annotation appropriately: + // - it is set to 'true' if generation of the object is '1' which usually + // indicates that the spec of the object has not been changed. + // - it is set to 'false' if generation of the object is greater than 1. + // + // The goal is to enable the kube-apiserver to apply update on suggested + // configuration objects installed by previous releases but not overwrite + // changes made by the cluster operators. + // Note that this distinction is imperfectly detected: in the case where an + // operator deletes a suggested configuration object and later creates it + // but with a variant spec and then does no updates of the object + // (generation is 1), the technique outlined above will incorrectly + // determine that the object should be auto-updated. + AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" +) + +const ( + // This annotation is only for use in v1beta3. + // + // The presence of this annotation in a v1beta3 object means that + // a zero value in the 'NominalConcurrencyShares' field means zero + // rather than the old default of 30. + // + // To set a zero value for the 'NominalConcurrencyShares' field in v1beta3, + // set the annotation to an empty string: + // "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares": "" + // + PriorityLevelPreserveZeroConcurrencySharesKey = "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchema + +// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with +// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". +type FlowSchema struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec FlowSchemaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a FlowSchema. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status FlowSchemaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchemaList + +// FlowSchemaList is a list of FlowSchema objects. +type FlowSchemaList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // `items` is a list of FlowSchemas. + Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FlowSchemaSpec describes how the FlowSchema's specification looks like. +type FlowSchemaSpec struct { + // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot + // be resolved, the FlowSchema will be ignored and marked as invalid in its status. + // Required. + PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"` + // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen + // FlowSchema is among those with the numerically lowest (which we take to be logically highest) + // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. + // Note that if the precedence is not specified, it will be set to 1000 as default. + // +optional + MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"` + // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. + // `nil` specifies that the distinguisher is disabled and thus will always be the empty string. + // +optional + DistinguisherMethod *FlowDistinguisherMethod `json:"distinguisherMethod,omitempty" protobuf:"bytes,3,opt,name=distinguisherMethod"` + // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if + // at least one member of rules matches the request. + // if it is an empty slice, there will be no requests matching the FlowSchema. + // +listType=atomic + // +optional + Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"` +} + +// FlowDistinguisherMethodType is the type of flow distinguisher method +type FlowDistinguisherMethodType string + +// These are valid flow-distinguisher methods. +const ( + // FlowDistinguisherMethodByUserType specifies that the flow distinguisher is the username in the request. + // This type is used to provide some insulation between users. + FlowDistinguisherMethodByUserType FlowDistinguisherMethodType = "ByUser" + + // FlowDistinguisherMethodByNamespaceType specifies that the flow distinguisher is the namespace of the + // object that the request acts upon. If the object is not namespaced, or if the request is a non-resource + // request, then the distinguisher will be the empty string. An example usage of this type is to provide + // some insulation between tenants in a situation where there are multiple tenants and each namespace + // is dedicated to a tenant. + FlowDistinguisherMethodByNamespaceType FlowDistinguisherMethodType = "ByNamespace" +) + +// FlowDistinguisherMethod specifies the method of a flow distinguisher. +type FlowDistinguisherMethod struct { + // `type` is the type of flow distinguisher method + // The supported types are "ByUser" and "ByNamespace". + // Required. + Type FlowDistinguisherMethodType `json:"type" protobuf:"bytes,1,opt,name=type"` +} + +// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used. +type PriorityLevelConfigurationReference struct { + // `name` is the name of the priority level configuration being referenced + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject +// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches +// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member +// of resourceRules or nonResourceRules matches the request. +type PolicyRulesWithSubjects struct { + // subjects is the list of normal user, serviceaccount, or group that this rule cares about. + // There must be at least one member in this slice. + // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. + // +listType=atomic + // Required. + Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"` + // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the + // target resource. + // At least one of `resourceRules` and `nonResourceRules` has to be non-empty. + // +listType=atomic + // +optional + ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"` + // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb + // and the target non-resource URL. + // +listType=atomic + // +optional + NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"` +} + +// Subject matches the originator of a request, as identified by the request authentication system. There are three +// ways of matching an originator; by user, group, or service account. +// +union +type Subject struct { + // `kind` indicates which one of the other fields is non-empty. + // Required + // +unionDiscriminator + Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // `user` matches based on username. + // +optional + User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // `group` matches based on user group name. + // +optional + Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // `serviceAccount` matches ServiceAccounts. + // +optional + ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"` +} + +// SubjectKind is the kind of subject. +type SubjectKind string + +// Supported subject's kinds. +const ( + SubjectKindUser SubjectKind = "User" + SubjectKindGroup SubjectKind = "Group" + SubjectKindServiceAccount SubjectKind = "ServiceAccount" +) + +// UserSubject holds detailed information for user-kind subject. +type UserSubject struct { + // `name` is the username that matches, or "*" to match all usernames. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// GroupSubject holds detailed information for group-kind subject. +type GroupSubject struct { + // name is the user group that matches, or "*" to match all user groups. + // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some + // well-known group names. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// ServiceAccountSubject holds detailed information for service-account-kind subject. +type ServiceAccountSubject struct { + // `namespace` is the namespace of matching ServiceAccount objects. + // Required. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name. + // Required. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// ResourcePolicyRule is a predicate that matches some resource +// requests, testing the request's verb and the target resource. A +// ResourcePolicyRule matches a resource request if and only if: (a) +// at least one member of verbs matches the request, (b) at least one +// member of apiGroups matches the request, (c) at least one member of +// resources matches the request, and (d) either (d1) the request does +// not specify a namespace (i.e., `Namespace==""`) and clusterScope is +// true or (d2) the request specifies a namespace and least one member +// of namespaces matches the request's namespace. +type ResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs and, if present, must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // `apiGroups` is a list of matching API groups and may not be empty. + // "*" matches all API groups and, if present, must be the only entry. + // +listType=set + // Required. + APIGroups []string `json:"apiGroups" protobuf:"bytes,2,rep,name=apiGroups"` + + // `resources` is a list of matching resources (i.e., lowercase + // and plural) with, if desired, subresource. For example, [ + // "services", "nodes/status" ]. This list may not be empty. + // "*" matches all resources and, if present, must be the only entry. + // Required. + // +listType=set + Resources []string `json:"resources" protobuf:"bytes,3,rep,name=resources"` + + // `clusterScope` indicates whether to match requests that do not + // specify a namespace (which happens either because the resource + // is not namespaced or the request targets all namespaces). + // If this field is omitted or false then the `namespaces` field + // must contain a non-empty list. + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,4,opt,name=clusterScope"` + + // `namespaces` is a list of target namespaces that restricts + // matches. A request that specifies a target namespace matches + // only if either (a) this list contains that target namespace or + // (b) this list contains "*". Note that "*" matches any + // specified namespace but does not match a request that _does + // not specify_ a namespace (see the `clusterScope` field for + // that). + // This list may be empty, but only if `clusterScope` is true. + // +optional + // +listType=set + Namespaces []string `json:"namespaces" protobuf:"bytes,5,rep,name=namespaces"` +} + +// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the +// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member +// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request. +type NonResourcePolicyRule struct { + // `verbs` is a list of matching verbs and may not be empty. + // "*" matches all verbs. If it is present, it must be the only entry. + // +listType=set + // Required. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. + // For example: + // - "/healthz" is legal + // - "/hea*" is illegal + // - "/hea" is legal but matches nothing + // - "/hea/*" also matches nothing + // - "/healthz/*" matches all per-component health checks. + // "*" matches all non-resource urls. if it is present, it must be the only entry. + // +listType=set + // Required. + NonResourceURLs []string `json:"nonResourceURLs" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// FlowSchemaStatus represents the current state of a FlowSchema. +type FlowSchemaStatus struct { + // `conditions` is a list of the current states of FlowSchema. + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []FlowSchemaCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// FlowSchemaCondition describes conditions for a FlowSchema. +type FlowSchemaCondition struct { + // `type` is the type of the condition. + // Required. + Type FlowSchemaConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// FlowSchemaConditionType is a valid value for FlowSchemaStatusCondition.Type +type FlowSchemaConditionType string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfiguration + +// PriorityLevelConfiguration represents the configuration of a priority level. +type PriorityLevelConfiguration struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `spec` is the specification of the desired behavior of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PriorityLevelConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // `status` is the current status of a "request-priority". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PriorityLevelConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfigurationList + +// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. +type PriorityLevelConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // `metadata` is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // `items` is a list of request-priorities. + Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PriorityLevelConfigurationSpec specifies the configuration of a priority level. +// +union +type PriorityLevelConfigurationSpec struct { + // `type` indicates whether this priority level is subject to + // limitation on request execution. A value of `"Exempt"` means + // that requests of this priority level are not subject to a limit + // (and thus are never queued) and do not detract from the + // capacity made available to other priority levels. A value of + // `"Limited"` means that (a) requests of this priority level + // _are_ subject to limits and (b) some of the server's limited + // capacity is made available exclusively to this priority level. + // Required. + // +unionDiscriminator + Type PriorityLevelEnablement `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `limited` specifies how requests are handled for a Limited priority level. + // This field must be non-empty if and only if `type` is `"Limited"`. + // +optional + Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"` + + // `exempt` specifies how requests are handled for an exempt priority level. + // This field MUST be empty if `type` is `"Limited"`. + // This field MAY be non-empty if `type` is `"Exempt"`. + // If empty and `type` is `"Exempt"` then the default values + // for `ExemptPriorityLevelConfiguration` apply. + // +optional + Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"` +} + +// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level +type PriorityLevelEnablement string + +// Supported priority level enablement values. +const ( + // PriorityLevelEnablementExempt means that requests are not subject to limits + PriorityLevelEnablementExempt PriorityLevelEnablement = "Exempt" + + // PriorityLevelEnablementLimited means that requests are subject to limits + PriorityLevelEnablementLimited PriorityLevelEnablement = "Limited" +) + +// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. +// It addresses two issues: +// - How are requests for this priority level limited? +// - What should be done with requests that exceed the limit? +type LimitedPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats available at this priority level. + // This is used both for requests dispatched from this priority level + // as well as requests dispatched from other priority levels + // borrowing seats from this level. + // The server's concurrency limit (ServerCL) is divided among the + // Limited priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of 30. + // +optional + NominalConcurrencyShares int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + + // `limitResponse` indicates what to do with requests that can not be executed right now + LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` + + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. The value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,3,opt,name=lendablePercent"` + + // `borrowingLimitPercent`, if present, configures a limit on how many + // seats this priority level can borrow from other priority levels. + // The limit is known as this level's BorrowingConcurrencyLimit + // (BorrowingCL) and is a limit on the total number of seats that this + // level may borrow at any one time. + // This field holds the ratio of that limit to the level's nominal + // concurrency limit. When this field is non-nil, it must hold a + // non-negative integer and the limit is calculated as follows. + // + // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 ) + // + // The value of this field can be more than 100, implying that this + // priority level can borrow a number of seats that is greater than + // its own nominal concurrency limit (NominalCL). + // When this field is left `nil`, the limit is effectively infinite. + // +optional + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"` +} + +// ExemptPriorityLevelConfiguration describes the configurable aspects +// of the handling of exempt requests. +// In the mandatory exempt configuration object the values in the fields +// here can be modified by authorized users, unlike the rest of the `spec`. +type ExemptPriorityLevelConfiguration struct { + // `nominalConcurrencyShares` (NCS) contributes to the computation of the + // NominalConcurrencyLimit (NominalCL) of this level. + // This is the number of execution seats nominally reserved for this priority level. + // This DOES NOT limit the dispatching from this priority level + // but affects the other priority levels through the borrowing mechanism. + // The server's concurrency limit (ServerCL) is divided among all the + // priority levels in proportion to their NCS values: + // + // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) + // sum_ncs = sum[priority level k] NCS(k) + // + // Bigger numbers mean a larger nominal concurrency limit, + // at the expense of every other priority level. + // This field has a default value of zero. + // +optional + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + // `lendablePercent` prescribes the fraction of the level's NominalCL that + // can be borrowed by other priority levels. This value of this + // field must be between 0 and 100, inclusive, and it defaults to 0. + // The number of seats that other levels can borrow from this level, known + // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows. + // + // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 ) + // + // +optional + LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"` + // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`. + // In other words, an exempt priority level + // has no meaningful limit on how much it borrows. + // There is no explicit representation of that here. +} + +// LimitResponse defines how to handle requests that can not be executed right now. +// +union +type LimitResponse struct { + // `type` is "Queue" or "Reject". + // "Queue" means that requests that can not be executed upon arrival + // are held in a queue until they can be executed or a queuing limit + // is reached. + // "Reject" means that requests that can not be executed upon arrival + // are rejected. + // Required. + // +unionDiscriminator + Type LimitResponseType `json:"type" protobuf:"bytes,1,opt,name=type"` + + // `queuing` holds the configuration parameters for queuing. + // This field may be non-empty only if `type` is `"Queue"`. + // +optional + Queuing *QueuingConfiguration `json:"queuing,omitempty" protobuf:"bytes,2,opt,name=queuing"` +} + +// LimitResponseType identifies how a Limited priority level handles a request that can not be executed right now +type LimitResponseType string + +// Supported limit responses. +const ( + // LimitResponseTypeQueue means that requests that can not be executed right now are queued until they can be executed or a queuing limit is hit + LimitResponseTypeQueue LimitResponseType = "Queue" + + // LimitResponseTypeReject means that requests that can not be executed right now are rejected + LimitResponseTypeReject LimitResponseType = "Reject" +) + +// QueuingConfiguration holds the configuration parameters for queuing +type QueuingConfiguration struct { + // `queues` is the number of queues for this priority level. The + // queues exist independently at each apiserver. The value must be + // positive. Setting it to 1 effectively precludes + // shufflesharding and thus makes the distinguisher method of + // associated flow schemas irrelevant. This field has a default + // value of 64. + // +optional + Queues int32 `json:"queues" protobuf:"varint,1,opt,name=queues"` + + // `handSize` is a small positive number that configures the + // shuffle sharding of requests into queues. When enqueuing a request + // at this priority level the request's flow identifier (a string + // pair) is hashed and the hash value is used to shuffle the list + // of queues and deal a hand of the size specified here. The + // request is put into one of the shortest queues in that hand. + // `handSize` must be no larger than `queues`, and should be + // significantly smaller (so that a few heavy flows do not + // saturate most of the queues). See the user-facing + // documentation for more extensive guidance on setting this + // field. This field has a default value of 8. + // +optional + HandSize int32 `json:"handSize" protobuf:"varint,2,opt,name=handSize"` + + // `queueLengthLimit` is the maximum number of requests allowed to + // be waiting in a given queue of this priority level at a time; + // excess requests are rejected. This value must be positive. If + // not specified, it will be defaulted to 50. + // +optional + QueueLengthLimit int32 `json:"queueLengthLimit" protobuf:"varint,3,opt,name=queueLengthLimit"` +} + +// PriorityLevelConfigurationConditionType is a valid value for PriorityLevelConfigurationStatusCondition.Type +type PriorityLevelConfigurationConditionType string + +// PriorityLevelConfigurationStatus represents the current state of a "request-priority". +type PriorityLevelConfigurationStatus struct { + // `conditions` is the current state of "request-priority". + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// PriorityLevelConfigurationCondition defines the condition of priority level. +type PriorityLevelConfigurationCondition struct { + // `type` is the type of the condition. + // Required. + Type PriorityLevelConfigurationConditionType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + // `status` is the status of the condition. + // Can be True, False, Unknown. + // Required. + Status ConditionStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // `lastTransitionTime` is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // `message` is a human-readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ConditionStatus is the status of the condition. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go similarity index 81% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go index aebb7f64c..fa76112a7 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1beta3 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -24,9 +24,19 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ExemptPriorityLevelConfiguration = map[string]string{ + "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", +} + +func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string { + return map_ExemptPriorityLevelConfiguration +} + var map_FlowDistinguisherMethod = map[string]string{ "": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", "type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", @@ -112,8 +122,10 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", + "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", } func (LimitedPriorityLevelConfiguration) SwaggerDoc() map[string]string { @@ -188,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{ "": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", "type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", "limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", } func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go similarity index 92% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go index 7f73f4606..09fefa20a 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go @@ -19,12 +19,38 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1beta3 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) { + *out = *in + if in.NominalConcurrencyShares != nil { + in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares + *out = new(int32) + **out = **in + } + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration. +func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration { + if in == nil { + return nil + } + out := new(ExemptPriorityLevelConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) { *out = *in @@ -212,6 +238,16 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in in.LimitResponse.DeepCopyInto(&out.LimitResponse) + if in.LendablePercent != nil { + in, out := &in.LendablePercent, &out.LendablePercent + *out = new(int32) + **out = **in + } + if in.BorrowingLimitPercent != nil { + in, out := &in.BorrowingLimitPercent, &out.BorrowingLimitPercent + *out = new(int32) + **out = **in + } return } @@ -390,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu *out = new(LimitedPriorityLevelConfiguration) (*in).DeepCopyInto(*out) } + if in.Exempt != nil { + in, out := &in.Exempt, &out.Exempt + *out = new(ExemptPriorityLevelConfiguration) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go similarity index 94% rename from vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go rename to vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go index 226014160..7e46a1469 100644 --- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by prerelease-lifecycle-gen. DO NOT EDIT. -package v1alpha1 +package v1beta3 import ( schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -28,95 +28,95 @@ import ( // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { - return 1, 18 + return 1, 26 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { - return 1, 20 + return 1, 29 } // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { - return 1, 21 + return 1, 32 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { - return 1, 18 + return 1, 26 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { - return 1, 20 + return 1, 29 } // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchemaList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchemaList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { - return 1, 21 + return 1, 32 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { - return 1, 18 + return 1, 26 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { - return 1, 20 + return 1, 29 } // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { - return 1, 21 + return 1, 32 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { - return 1, 18 + return 1, 26 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { - return 1, 20 + return 1, 29 } // APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. // It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfigurationList"} + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfigurationList"} } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { - return 1, 21 + return 1, 32 } diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index d3ffd5ed1..1d13e7bab 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=networking.k8s.io package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 719861b96..7c023e690 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto +// source: k8s.io/api/networking/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{0} + return fileDescriptor_2c41434372fec1d7, []int{0} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{1} + return fileDescriptor_2c41434372fec1d7, []int{1} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{2} + return fileDescriptor_2c41434372fec1d7, []int{2} } func (m *IPBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_IPBlock proto.InternalMessageInfo func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{3} + return fileDescriptor_2c41434372fec1d7, []int{3} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{4} + return fileDescriptor_2c41434372fec1d7, []int{4} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressClass) Reset() { *m = IngressClass{} } func (*IngressClass) ProtoMessage() {} func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{5} + return fileDescriptor_2c41434372fec1d7, []int{5} } func (m *IngressClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo func (m *IngressClassList) Reset() { *m = IngressClassList{} } func (*IngressClassList) ProtoMessage() {} func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{6} + return fileDescriptor_2c41434372fec1d7, []int{6} } func (m *IngressClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} } func (*IngressClassParametersReference) ProtoMessage() {} func (*IngressClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{7} + return fileDescriptor_2c41434372fec1d7, []int{7} } func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } func (*IngressClassSpec) ProtoMessage() {} func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{8} + return fileDescriptor_2c41434372fec1d7, []int{8} } func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{9} + return fileDescriptor_2c41434372fec1d7, []int{9} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,10 +328,94 @@ func (m *IngressList) XXX_DiscardUnknown() { var xxx_messageInfo_IngressList proto.InternalMessageInfo +func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } +func (*IngressLoadBalancerIngress) ProtoMessage() {} +func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{10} +} +func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src) +} +func (m *IngressLoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo + +func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } +func (*IngressLoadBalancerStatus) ProtoMessage() {} +func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{11} +} +func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src) +} +func (m *IngressLoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo + +func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } +func (*IngressPortStatus) ProtoMessage() {} +func (*IngressPortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2c41434372fec1d7, []int{12} +} +func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressPortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressPortStatus.Merge(m, src) +} +func (m *IngressPortStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressPortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressPortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo + func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{10} + return fileDescriptor_2c41434372fec1d7, []int{13} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +443,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{11} + return fileDescriptor_2c41434372fec1d7, []int{14} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +471,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} } func (*IngressServiceBackend) ProtoMessage() {} func (*IngressServiceBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{12} + return fileDescriptor_2c41434372fec1d7, []int{15} } func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +499,7 @@ var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{13} + return fileDescriptor_2c41434372fec1d7, []int{16} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +527,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{14} + return fileDescriptor_2c41434372fec1d7, []int{17} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +555,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{15} + return fileDescriptor_2c41434372fec1d7, []int{18} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +583,7 @@ var xxx_messageInfo_IngressTLS proto.InternalMessageInfo func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{16} + return fileDescriptor_2c41434372fec1d7, []int{19} } func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +611,7 @@ var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{17} + return fileDescriptor_2c41434372fec1d7, []int{20} } func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +639,7 @@ var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{18} + return fileDescriptor_2c41434372fec1d7, []int{21} } func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +667,7 @@ var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{19} + return fileDescriptor_2c41434372fec1d7, []int{22} } func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +695,7 @@ var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{20} + return fileDescriptor_2c41434372fec1d7, []int{23} } func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +723,7 @@ var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{21} + return fileDescriptor_2c41434372fec1d7, []int{24} } func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +751,7 @@ var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{22} + return fileDescriptor_2c41434372fec1d7, []int{25} } func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -695,7 +779,7 @@ var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} } func (*ServiceBackendPort) ProtoMessage() {} func (*ServiceBackendPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{23} + return fileDescriptor_2c41434372fec1d7, []int{26} } func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -731,6 +815,9 @@ func init() { proto.RegisterType((*IngressClassParametersReference)(nil), "k8s.io.api.networking.v1.IngressClassParametersReference") proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1.IngressClassSpec") proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1.IngressList") + proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.networking.v1.IngressLoadBalancerIngress") + proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.networking.v1.IngressLoadBalancerStatus") + proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.networking.v1.IngressPortStatus") proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1.IngressRule") proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1.IngressRuleValue") proto.RegisterType((*IngressServiceBackend)(nil), "k8s.io.api.networking.v1.IngressServiceBackend") @@ -748,108 +835,115 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) -} - -var fileDescriptor_1c72867a70a7cc90 = []byte{ - // 1545 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x1b, 0xcf, 0x3a, 0x71, 0xec, 0x3c, 0x4e, 0xd2, 0x74, 0xde, 0x56, 0xaf, 0xd5, 0x57, 0xaf, 0x1d, - 0x56, 0xb4, 0x0d, 0x94, 0xda, 0x24, 0xad, 0x10, 0x9c, 0xa0, 0x9b, 0xb6, 0x69, 0x68, 0x9a, 0x58, - 0x63, 0xab, 0x08, 0x04, 0xa8, 0x93, 0xf5, 0xc4, 0xd9, 0x7a, 0xbd, 0xb3, 0xcc, 0x8e, 0x43, 0x7b, - 0xe3, 0xc2, 0x81, 0x1b, 0xff, 0x02, 0x7f, 0x02, 0x82, 0x1b, 0x82, 0xc2, 0x05, 0xf5, 0x58, 0x89, - 0x4b, 0x2f, 0x58, 0xd4, 0xfc, 0x17, 0x39, 0xa1, 0x99, 0x9d, 0xfd, 0xb0, 0x1d, 0x63, 0xab, 0xaa, - 0x72, 0x8a, 0xf7, 0xf9, 0xf8, 0x3d, 0x1f, 0xf3, 0x7c, 0xcc, 0x04, 0x6e, 0xb4, 0xdf, 0x0d, 0x2a, - 0x0e, 0xab, 0xb6, 0xbb, 0xfb, 0x94, 0x7b, 0x54, 0xd0, 0xa0, 0x7a, 0x44, 0xbd, 0x26, 0xe3, 0x55, - 0xcd, 0x20, 0xbe, 0x53, 0xf5, 0xa8, 0xf8, 0x92, 0xf1, 0xb6, 0xe3, 0xb5, 0xaa, 0x47, 0xeb, 0xd5, - 0x16, 0xf5, 0x28, 0x27, 0x82, 0x36, 0x2b, 0x3e, 0x67, 0x82, 0xa1, 0x62, 0x28, 0x59, 0x21, 0xbe, - 0x53, 0x49, 0x24, 0x2b, 0x47, 0xeb, 0x17, 0xae, 0xb6, 0x1c, 0x71, 0xd8, 0xdd, 0xaf, 0xd8, 0xac, - 0x53, 0x6d, 0xb1, 0x16, 0xab, 0x2a, 0x85, 0xfd, 0xee, 0x81, 0xfa, 0x52, 0x1f, 0xea, 0x57, 0x08, - 0x74, 0xc1, 0x4c, 0x99, 0xb4, 0x19, 0xa7, 0x27, 0x18, 0xbb, 0x70, 0x3d, 0x91, 0xe9, 0x10, 0xfb, - 0xd0, 0xf1, 0x28, 0x7f, 0x5c, 0xf5, 0xdb, 0x2d, 0x49, 0x08, 0xaa, 0x1d, 0x2a, 0xc8, 0x49, 0x5a, - 0xd5, 0x71, 0x5a, 0xbc, 0xeb, 0x09, 0xa7, 0x43, 0x47, 0x14, 0xde, 0x99, 0xa4, 0x10, 0xd8, 0x87, - 0xb4, 0x43, 0x46, 0xf4, 0xae, 0x8d, 0xd3, 0xeb, 0x0a, 0xc7, 0xad, 0x3a, 0x9e, 0x08, 0x04, 0x1f, - 0x56, 0x32, 0x7f, 0x31, 0xe0, 0xcc, 0x9d, 0x46, 0xa3, 0xb6, 0xed, 0xb5, 0x38, 0x0d, 0x82, 0x1a, - 0x11, 0x87, 0x68, 0x15, 0xe6, 0x7c, 0x22, 0x0e, 0x8b, 0xc6, 0xaa, 0xb1, 0xb6, 0x60, 0x2d, 0x3e, - 0xed, 0x95, 0x67, 0xfa, 0xbd, 0xf2, 0x9c, 0xe4, 0x61, 0xc5, 0x41, 0xd7, 0x21, 0x2f, 0xff, 0x36, - 0x1e, 0xfb, 0xb4, 0x38, 0xab, 0xa4, 0x8a, 0xfd, 0x5e, 0x39, 0x5f, 0xd3, 0xb4, 0xe3, 0xd4, 0x6f, - 0x1c, 0x4b, 0xa2, 0x3a, 0xe4, 0xf6, 0x89, 0xdd, 0xa6, 0x5e, 0xb3, 0x98, 0x59, 0x35, 0xd6, 0x0a, - 0x1b, 0x6b, 0x95, 0x71, 0xc7, 0x57, 0xd1, 0xfe, 0x58, 0xa1, 0xbc, 0x75, 0x46, 0x3b, 0x91, 0xd3, - 0x04, 0x1c, 0x21, 0x99, 0x07, 0x70, 0x2e, 0xe5, 0x3f, 0xee, 0xba, 0xf4, 0x3e, 0x71, 0xbb, 0x14, - 0xed, 0x42, 0x56, 0x1a, 0x0e, 0x8a, 0xc6, 0xea, 0xec, 0x5a, 0x61, 0xe3, 0x8d, 0xf1, 0xa6, 0x86, - 0xc2, 0xb7, 0x96, 0xb4, 0xad, 0xac, 0xfc, 0x0a, 0x70, 0x08, 0x63, 0xee, 0x41, 0x6e, 0xbb, 0x66, - 0xb9, 0xcc, 0x6e, 0xcb, 0xfc, 0xd8, 0x4e, 0x93, 0x0f, 0xe7, 0x67, 0x73, 0xfb, 0x26, 0xc6, 0x8a, - 0x83, 0x4c, 0x98, 0xa7, 0x8f, 0x6c, 0xea, 0x8b, 0x62, 0x66, 0x75, 0x76, 0x6d, 0xc1, 0x82, 0x7e, - 0xaf, 0x3c, 0x7f, 0x4b, 0x51, 0xb0, 0xe6, 0x98, 0x5f, 0x67, 0x20, 0xa7, 0xcd, 0xa2, 0x07, 0x90, - 0x97, 0xe5, 0xd3, 0x24, 0x82, 0x28, 0xd4, 0xc2, 0xc6, 0xdb, 0x29, 0x7f, 0xe3, 0xd3, 0xac, 0xf8, - 0xed, 0x96, 0x24, 0x04, 0x15, 0x29, 0x2d, 0x7d, 0xdf, 0xdb, 0x7f, 0x48, 0x6d, 0x71, 0x8f, 0x0a, - 0x62, 0x21, 0xed, 0x07, 0x24, 0x34, 0x1c, 0xa3, 0xa2, 0x2d, 0x98, 0x0b, 0x7c, 0x6a, 0xeb, 0xc4, - 0x5f, 0x9c, 0x98, 0xf8, 0xba, 0x4f, 0xed, 0x24, 0x34, 0xf9, 0x85, 0x15, 0x00, 0xda, 0x83, 0xf9, - 0x40, 0x10, 0xd1, 0x0d, 0xd4, 0xc1, 0x17, 0x36, 0x2e, 0x4f, 0x86, 0x52, 0xe2, 0xd6, 0xb2, 0x06, - 0x9b, 0x0f, 0xbf, 0xb1, 0x86, 0x31, 0x7f, 0x33, 0x60, 0x79, 0xf0, 0xb4, 0xd1, 0x7d, 0xc8, 0x05, - 0x94, 0x1f, 0x39, 0x36, 0x2d, 0xce, 0x29, 0x23, 0xd5, 0xc9, 0x46, 0x42, 0xf9, 0xa8, 0x5e, 0x0a, - 0xb2, 0x56, 0x34, 0x0d, 0x47, 0x60, 0xe8, 0x23, 0xc8, 0x73, 0x1a, 0xb0, 0x2e, 0xb7, 0xa9, 0xf6, - 0xfe, 0x6a, 0x1a, 0x58, 0xf6, 0xbd, 0x84, 0x94, 0xc5, 0xda, 0xdc, 0x61, 0x36, 0x71, 0xc3, 0x54, - 0x62, 0x7a, 0x40, 0x39, 0xf5, 0x6c, 0x6a, 0x2d, 0xca, 0x2a, 0xc7, 0x1a, 0x02, 0xc7, 0x60, 0xb2, - 0x8b, 0x16, 0xb5, 0x23, 0x9b, 0x2e, 0x39, 0x95, 0x03, 0xdd, 0x19, 0x38, 0xd0, 0x37, 0x27, 0x26, - 0x48, 0xf9, 0x35, 0xee, 0x54, 0xcd, 0x9f, 0x0d, 0x58, 0x49, 0x0b, 0xee, 0x38, 0x81, 0x40, 0x9f, - 0x8e, 0x04, 0x51, 0x99, 0x2e, 0x08, 0xa9, 0xad, 0x42, 0x58, 0xd1, 0xa6, 0xf2, 0x11, 0x25, 0x15, - 0xc0, 0x5d, 0xc8, 0x3a, 0x82, 0x76, 0x02, 0xd5, 0x22, 0x85, 0x8d, 0x4b, 0xd3, 0x45, 0x90, 0x74, - 0xe7, 0xb6, 0x54, 0xc6, 0x21, 0x86, 0xf9, 0xa7, 0x01, 0xe5, 0xb4, 0x58, 0x8d, 0x70, 0xd2, 0xa1, - 0x82, 0xf2, 0x20, 0x3e, 0x3c, 0xb4, 0x06, 0x79, 0x52, 0xdb, 0xde, 0xe2, 0xac, 0xeb, 0x47, 0xad, - 0x2b, 0x5d, 0xbb, 0xa1, 0x69, 0x38, 0xe6, 0xca, 0x06, 0x6f, 0x3b, 0x7a, 0x4a, 0xa5, 0x1a, 0xfc, - 0xae, 0xe3, 0x35, 0xb1, 0xe2, 0x48, 0x09, 0x8f, 0x74, 0xa2, 0xe1, 0x17, 0x4b, 0xec, 0x92, 0x0e, - 0xc5, 0x8a, 0x83, 0xca, 0x90, 0x0d, 0x6c, 0xe6, 0x87, 0x15, 0xbc, 0x60, 0x2d, 0x48, 0x97, 0xeb, - 0x92, 0x80, 0x43, 0x3a, 0xba, 0x02, 0x0b, 0x52, 0x30, 0xf0, 0x89, 0x4d, 0x8b, 0x59, 0x25, 0xb4, - 0xd4, 0xef, 0x95, 0x17, 0x76, 0x23, 0x22, 0x4e, 0xf8, 0xe6, 0xf7, 0x43, 0xe7, 0x23, 0x8f, 0x0e, - 0x6d, 0x00, 0xd8, 0xcc, 0x13, 0x9c, 0xb9, 0x2e, 0x8d, 0xa6, 0x51, 0x5c, 0x34, 0x9b, 0x31, 0x07, - 0xa7, 0xa4, 0x90, 0x03, 0xe0, 0xc7, 0xb9, 0xd1, 0xc5, 0xf3, 0xde, 0x74, 0xa9, 0x3f, 0x21, 0xa7, - 0xd6, 0xb2, 0x34, 0x95, 0x62, 0xa4, 0xc0, 0xcd, 0x1f, 0x0c, 0x28, 0x68, 0xfd, 0x53, 0x28, 0xa7, - 0xdb, 0x83, 0xe5, 0xf4, 0xda, 0xe4, 0xd5, 0x72, 0x72, 0x25, 0x7d, 0x97, 0x78, 0x2d, 0x97, 0x89, - 0x3c, 0xe9, 0x43, 0x16, 0x88, 0xe1, 0x61, 0x7f, 0x87, 0x05, 0x02, 0x2b, 0x0e, 0xf2, 0x61, 0xc5, - 0x19, 0xda, 0x3e, 0x53, 0x77, 0x65, 0xac, 0x61, 0x15, 0x35, 0xf2, 0xca, 0x30, 0x07, 0x8f, 0xa0, - 0x9b, 0x0f, 0x60, 0x44, 0x4a, 0xce, 0x83, 0x43, 0x21, 0xfc, 0x13, 0x32, 0x3b, 0x7e, 0xdd, 0x25, - 0xd6, 0xf3, 0x2a, 0xa6, 0x46, 0xa3, 0x86, 0x15, 0x8a, 0xf9, 0x8d, 0x01, 0xe7, 0x4f, 0x9c, 0xac, - 0x71, 0xe5, 0x1b, 0x63, 0x2b, 0x7f, 0x17, 0xe6, 0x7c, 0xc6, 0x85, 0xce, 0xc1, 0x5b, 0xe3, 0x3d, - 0x19, 0x44, 0xae, 0x31, 0x2e, 0x52, 0x97, 0x0d, 0xc6, 0x05, 0x56, 0x38, 0xe6, 0xef, 0x99, 0xf8, - 0x44, 0x54, 0xd9, 0x7f, 0x10, 0xe7, 0x5b, 0x95, 0xa5, 0xb4, 0xac, 0x9b, 0xec, 0x5c, 0x2a, 0x7f, - 0x31, 0x0f, 0x8f, 0x48, 0xa3, 0x26, 0x2c, 0x37, 0xe9, 0x01, 0xe9, 0xba, 0x42, 0xdb, 0xd6, 0x59, - 0x9b, 0xfe, 0x3e, 0x82, 0xfa, 0xbd, 0xf2, 0xf2, 0xcd, 0x01, 0x0c, 0x3c, 0x84, 0x89, 0x36, 0x61, - 0x56, 0xb8, 0x51, 0x3d, 0xbe, 0x3e, 0x11, 0xba, 0xb1, 0x53, 0xb7, 0x0a, 0x3a, 0xfc, 0xd9, 0xc6, - 0x4e, 0x1d, 0x4b, 0x6d, 0xf4, 0x21, 0x64, 0x79, 0xd7, 0xa5, 0x72, 0xdb, 0xce, 0x4e, 0xb5, 0xb8, - 0xe5, 0x99, 0x26, 0xa5, 0x2d, 0xbf, 0x02, 0x1c, 0x42, 0x98, 0x5f, 0xc0, 0xd2, 0xc0, 0x4a, 0x46, - 0x0f, 0x60, 0xd1, 0x65, 0xa4, 0x69, 0x11, 0x97, 0x78, 0xb6, 0x1e, 0x21, 0x43, 0x93, 0x38, 0xda, - 0x89, 0x3b, 0x29, 0x39, 0xbd, 0xd0, 0xcf, 0x69, 0x23, 0x8b, 0x69, 0x1e, 0x1e, 0x40, 0x34, 0x09, - 0x40, 0x12, 0x9e, 0x9c, 0x89, 0xb2, 0x63, 0xc2, 0x3b, 0x99, 0x9e, 0x89, 0xb2, 0x91, 0x02, 0x1c, - 0xd2, 0xe5, 0x44, 0x0b, 0xa8, 0xcd, 0xa9, 0x50, 0x87, 0x9a, 0x19, 0x9c, 0x68, 0xf5, 0x98, 0x83, - 0x53, 0x52, 0xe6, 0xaf, 0x06, 0x2c, 0xed, 0x86, 0x99, 0xa8, 0x31, 0xd7, 0xb1, 0x1f, 0x9f, 0xc2, - 0xf2, 0xbd, 0x37, 0xb0, 0x7c, 0xaf, 0x8c, 0x3f, 0x94, 0x01, 0xc7, 0xc6, 0x6e, 0xdf, 0x1f, 0x0d, - 0xf8, 0xef, 0x80, 0xe4, 0xad, 0x64, 0xfe, 0xd4, 0x20, 0x2b, 0xbb, 0x20, 0xba, 0xc7, 0x4e, 0x6b, - 0x4b, 0x75, 0x53, 0x72, 0x93, 0x95, 0x08, 0x38, 0x04, 0x42, 0x5b, 0x90, 0x11, 0x4c, 0x97, 0xe5, - 0xd4, 0x70, 0x94, 0x72, 0x0b, 0x34, 0x5c, 0xa6, 0xc1, 0x70, 0x46, 0x30, 0xf3, 0x27, 0x03, 0x8a, - 0x03, 0x52, 0xe9, 0xb9, 0xf9, 0xea, 0xfd, 0xbe, 0x07, 0x73, 0x07, 0x9c, 0x75, 0x5e, 0xc6, 0xf3, - 0x38, 0xe9, 0xb7, 0x39, 0xeb, 0x60, 0x05, 0x63, 0x3e, 0x31, 0xe0, 0xec, 0x80, 0xe4, 0x29, 0x2c, - 0xa9, 0x9d, 0xc1, 0x25, 0x75, 0x79, 0xca, 0x18, 0xc6, 0xac, 0xaa, 0x27, 0x99, 0xa1, 0x08, 0x64, - 0xac, 0xe8, 0x00, 0x0a, 0x3e, 0x6b, 0xd6, 0xa9, 0x4b, 0x6d, 0xc1, 0xa2, 0x9e, 0xbe, 0x36, 0x65, - 0x10, 0x64, 0x9f, 0xba, 0x91, 0xaa, 0x75, 0xa6, 0xdf, 0x2b, 0x17, 0x6a, 0x09, 0x16, 0x4e, 0x03, - 0xa3, 0x47, 0x70, 0x36, 0xbe, 0x9f, 0xc4, 0xd6, 0x32, 0x2f, 0x6f, 0xed, 0x7c, 0xbf, 0x57, 0x3e, - 0xbb, 0x3b, 0x8c, 0x88, 0x47, 0x8d, 0xa0, 0x3b, 0x90, 0x73, 0x7c, 0xf5, 0x14, 0xd3, 0xb7, 0xf8, - 0x7f, 0x5b, 0xf6, 0xe1, 0x9b, 0x2d, 0x7c, 0x10, 0xe8, 0x0f, 0x1c, 0xa9, 0x9b, 0x7f, 0x0c, 0xd7, - 0x80, 0x2c, 0x38, 0xb4, 0x05, 0x79, 0xf5, 0x38, 0xb6, 0x99, 0xab, 0xd7, 0xdc, 0x15, 0xf5, 0xba, - 0xd5, 0xb4, 0xe3, 0x5e, 0xf9, 0x7f, 0xa3, 0xff, 0x2d, 0xa8, 0x44, 0x6c, 0x1c, 0x2b, 0x0f, 0x6d, - 0xc2, 0xf1, 0x43, 0x48, 0x3e, 0xd0, 0x2b, 0xe1, 0x03, 0xbd, 0xb2, 0xed, 0x89, 0x3d, 0x5e, 0x17, - 0xdc, 0xf1, 0x5a, 0xe1, 0x56, 0x4e, 0x36, 0x21, 0xba, 0x08, 0x39, 0xbd, 0x28, 0x55, 0xe0, 0xd9, - 0x30, 0xaa, 0x5b, 0x21, 0x09, 0x47, 0x3c, 0xf3, 0x78, 0xb8, 0x2e, 0xd4, 0xda, 0x7c, 0xf8, 0xca, - 0xea, 0xe2, 0x3f, 0xba, 0x1a, 0xc7, 0xd7, 0xc6, 0x67, 0x90, 0xd3, 0x4b, 0x57, 0x57, 0xfa, 0xc6, - 0x94, 0x95, 0x9e, 0x5e, 0x62, 0xf1, 0x9b, 0x3f, 0x22, 0x46, 0x98, 0xe8, 0x63, 0x98, 0xa7, 0x21, - 0x7a, 0xb8, 0x15, 0xd7, 0xa7, 0x44, 0x4f, 0xc6, 0x6a, 0xf2, 0x1a, 0xd5, 0x34, 0x0d, 0x88, 0xde, - 0x97, 0x59, 0x92, 0xb2, 0xf2, 0x11, 0x18, 0x14, 0xe7, 0xd4, 0xa2, 0xfa, 0x7f, 0x18, 0x6c, 0x4c, - 0x3e, 0x96, 0x97, 0xde, 0xf8, 0x13, 0xa7, 0x35, 0xcc, 0xcf, 0x01, 0x8d, 0xde, 0x6b, 0xa6, 0xb8, - 0x35, 0x5d, 0x82, 0x79, 0xaf, 0xdb, 0xd9, 0xa7, 0x61, 0x0f, 0x65, 0x13, 0x07, 0x77, 0x15, 0x15, - 0x6b, 0xae, 0xb5, 0xf6, 0xf4, 0x45, 0x69, 0xe6, 0xd9, 0x8b, 0xd2, 0xcc, 0xf3, 0x17, 0xa5, 0x99, - 0xaf, 0xfa, 0x25, 0xe3, 0x69, 0xbf, 0x64, 0x3c, 0xeb, 0x97, 0x8c, 0xe7, 0xfd, 0x92, 0xf1, 0x57, - 0xbf, 0x64, 0x7c, 0xfb, 0x77, 0x69, 0xe6, 0x93, 0xcc, 0xd1, 0xfa, 0x3f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0xe9, 0x15, 0xcc, 0xab, 0x5f, 0x13, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/networking/v1/generated.proto", fileDescriptor_2c41434372fec1d7) +} + +var fileDescriptor_2c41434372fec1d7 = []byte{ + // 1652 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x55, + 0x14, 0xce, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x69, 0x85, 0x09, 0xc2, 0x0e, 0x23, 0xda, + 0x06, 0xda, 0xda, 0x34, 0xad, 0x10, 0x6c, 0x78, 0x4c, 0x9a, 0xa6, 0xa1, 0xa9, 0x63, 0x5d, 0x5b, + 0x45, 0x20, 0x1e, 0x9d, 0x8c, 0x6f, 0x9c, 0x69, 0xc6, 0x33, 0xa3, 0x3b, 0xd7, 0xa5, 0x95, 0x10, + 0x62, 0xc3, 0x82, 0x1d, 0x7f, 0x01, 0xf1, 0x0b, 0x10, 0x2c, 0x90, 0x10, 0x14, 0x36, 0xa8, 0xcb, + 0x4a, 0x6c, 0xba, 0xc1, 0xa2, 0xe6, 0x5f, 0x64, 0x85, 0xee, 0x63, 0x1e, 0x7e, 0xd5, 0xa6, 0xaa, + 0xb2, 0x4a, 0xee, 0x39, 0xe7, 0x7e, 0xe7, 0x71, 0xcf, 0x6b, 0x0c, 0x6b, 0x87, 0x6f, 0x06, 0x25, + 0xdb, 0x2b, 0x9b, 0xbe, 0x5d, 0x76, 0x09, 0xfb, 0xdc, 0xa3, 0x87, 0xb6, 0xdb, 0x2c, 0xdf, 0xb9, + 0x58, 0x6e, 0x12, 0x97, 0x50, 0x93, 0x91, 0x46, 0xc9, 0xa7, 0x1e, 0xf3, 0x50, 0x5e, 0x4a, 0x96, + 0x4c, 0xdf, 0x2e, 0xc5, 0x92, 0xa5, 0x3b, 0x17, 0x57, 0x2e, 0x34, 0x6d, 0x76, 0xd0, 0xde, 0x2b, + 0x59, 0x5e, 0xab, 0xdc, 0xf4, 0x9a, 0x5e, 0x59, 0x5c, 0xd8, 0x6b, 0xef, 0x8b, 0x93, 0x38, 0x88, + 0xff, 0x24, 0xd0, 0x8a, 0x9e, 0x50, 0x69, 0x79, 0x94, 0x0c, 0x51, 0xb6, 0x72, 0x39, 0x96, 0x69, + 0x99, 0xd6, 0x81, 0xed, 0x12, 0x7a, 0xaf, 0xec, 0x1f, 0x36, 0x39, 0x21, 0x28, 0xb7, 0x08, 0x33, + 0x87, 0xdd, 0x2a, 0x8f, 0xba, 0x45, 0xdb, 0x2e, 0xb3, 0x5b, 0x64, 0xe0, 0xc2, 0x1b, 0xe3, 0x2e, + 0x04, 0xd6, 0x01, 0x69, 0x99, 0x03, 0xf7, 0x2e, 0x8d, 0xba, 0xd7, 0x66, 0xb6, 0x53, 0xb6, 0x5d, + 0x16, 0x30, 0xda, 0x7f, 0x49, 0xff, 0x4d, 0x83, 0x13, 0xd7, 0xea, 0xf5, 0xea, 0xb6, 0xdb, 0xa4, + 0x24, 0x08, 0xaa, 0x26, 0x3b, 0x40, 0xab, 0x30, 0xe3, 0x9b, 0xec, 0x20, 0xaf, 0xad, 0x6a, 0x6b, + 0x73, 0xc6, 0xfc, 0x83, 0x4e, 0x71, 0xaa, 0xdb, 0x29, 0xce, 0x70, 0x1e, 0x16, 0x1c, 0x74, 0x19, + 0xb2, 0xfc, 0x6f, 0xfd, 0x9e, 0x4f, 0xf2, 0xd3, 0x42, 0x2a, 0xdf, 0xed, 0x14, 0xb3, 0x55, 0x45, + 0x3b, 0x4a, 0xfc, 0x8f, 0x23, 0x49, 0x54, 0x83, 0xcc, 0x9e, 0x69, 0x1d, 0x12, 0xb7, 0x91, 0x4f, + 0xad, 0x6a, 0x6b, 0xb9, 0xf5, 0xb5, 0xd2, 0xa8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f, 0x9c, + 0x50, 0x46, 0x64, 0x14, 0x01, 0x87, 0x48, 0xfa, 0x3e, 0x9c, 0x4c, 0xd8, 0x8f, 0xdb, 0x0e, 0xb9, + 0x69, 0x3a, 0x6d, 0x82, 0x2a, 0x90, 0xe6, 0x8a, 0x83, 0xbc, 0xb6, 0x3a, 0xbd, 0x96, 0x5b, 0x7f, + 0x75, 0xb4, 0xaa, 0x3e, 0xf7, 0x8d, 0x05, 0xa5, 0x2b, 0xcd, 0x4f, 0x01, 0x96, 0x30, 0xfa, 0x2e, + 0x64, 0xb6, 0xab, 0x86, 0xe3, 0x59, 0x87, 0x3c, 0x3e, 0x96, 0xdd, 0xa0, 0xfd, 0xf1, 0xd9, 0xd8, + 0xbe, 0x82, 0xb1, 0xe0, 0x20, 0x1d, 0x66, 0xc9, 0x5d, 0x8b, 0xf8, 0x2c, 0x9f, 0x5a, 0x9d, 0x5e, + 0x9b, 0x33, 0xa0, 0xdb, 0x29, 0xce, 0x6e, 0x0a, 0x0a, 0x56, 0x1c, 0xfd, 0xeb, 0x14, 0x64, 0x94, + 0x5a, 0x74, 0x0b, 0xb2, 0x3c, 0x7d, 0x1a, 0x26, 0x33, 0x05, 0x6a, 0x6e, 0xfd, 0xf5, 0x84, 0xbd, + 0xd1, 0x6b, 0x96, 0xfc, 0xc3, 0x26, 0x27, 0x04, 0x25, 0x2e, 0xcd, 0x6d, 0xdf, 0xdd, 0xbb, 0x4d, + 0x2c, 0x76, 0x83, 0x30, 0xd3, 0x40, 0xca, 0x0e, 0x88, 0x69, 0x38, 0x42, 0x45, 0x5b, 0x30, 0x13, + 0xf8, 0xc4, 0x52, 0x81, 0x3f, 0x3d, 0x36, 0xf0, 0x35, 0x9f, 0x58, 0xb1, 0x6b, 0xfc, 0x84, 0x05, + 0x00, 0xda, 0x85, 0xd9, 0x80, 0x99, 0xac, 0x1d, 0x88, 0x87, 0xcf, 0xad, 0x9f, 0x1d, 0x0f, 0x25, + 0xc4, 0x8d, 0x45, 0x05, 0x36, 0x2b, 0xcf, 0x58, 0xc1, 0xe8, 0x7f, 0x68, 0xb0, 0xd8, 0xfb, 0xda, + 0xe8, 0x26, 0x64, 0x02, 0x42, 0xef, 0xd8, 0x16, 0xc9, 0xcf, 0x08, 0x25, 0xe5, 0xf1, 0x4a, 0xa4, + 0x7c, 0x98, 0x2f, 0x39, 0x9e, 0x2b, 0x8a, 0x86, 0x43, 0x30, 0xf4, 0x01, 0x64, 0x29, 0x09, 0xbc, + 0x36, 0xb5, 0x88, 0xb2, 0xfe, 0x42, 0x12, 0x98, 0xd7, 0x3d, 0x87, 0xe4, 0xc9, 0xda, 0xd8, 0xf1, + 0x2c, 0xd3, 0x91, 0xa1, 0xc4, 0x64, 0x9f, 0x50, 0xe2, 0x5a, 0xc4, 0x98, 0xe7, 0x59, 0x8e, 0x15, + 0x04, 0x8e, 0xc0, 0x78, 0x15, 0xcd, 0x2b, 0x43, 0x36, 0x1c, 0xf3, 0x58, 0x1e, 0x74, 0xa7, 0xe7, + 0x41, 0x5f, 0x1b, 0x1b, 0x20, 0x61, 0xd7, 0xa8, 0x57, 0xd5, 0x7f, 0xd5, 0x60, 0x29, 0x29, 0xb8, + 0x63, 0x07, 0x0c, 0x7d, 0x3c, 0xe0, 0x44, 0x69, 0x32, 0x27, 0xf8, 0x6d, 0xe1, 0xc2, 0x92, 0x52, + 0x95, 0x0d, 0x29, 0x09, 0x07, 0xae, 0x43, 0xda, 0x66, 0xa4, 0x15, 0x88, 0x12, 0xc9, 0xad, 0x9f, + 0x99, 0xcc, 0x83, 0xb8, 0x3a, 0xb7, 0xf9, 0x65, 0x2c, 0x31, 0xf4, 0xbf, 0x35, 0x28, 0x26, 0xc5, + 0xaa, 0x26, 0x35, 0x5b, 0x84, 0x11, 0x1a, 0x44, 0x8f, 0x87, 0xd6, 0x20, 0x6b, 0x56, 0xb7, 0xb7, + 0xa8, 0xd7, 0xf6, 0xc3, 0xd2, 0xe5, 0xa6, 0xbd, 0xa7, 0x68, 0x38, 0xe2, 0xf2, 0x02, 0x3f, 0xb4, + 0x55, 0x97, 0x4a, 0x14, 0xf8, 0x75, 0xdb, 0x6d, 0x60, 0xc1, 0xe1, 0x12, 0xae, 0xd9, 0x0a, 0x9b, + 0x5f, 0x24, 0x51, 0x31, 0x5b, 0x04, 0x0b, 0x0e, 0x2a, 0x42, 0x3a, 0xb0, 0x3c, 0x5f, 0x66, 0xf0, + 0x9c, 0x31, 0xc7, 0x4d, 0xae, 0x71, 0x02, 0x96, 0x74, 0x74, 0x0e, 0xe6, 0xb8, 0x60, 0xe0, 0x9b, + 0x16, 0xc9, 0xa7, 0x85, 0xd0, 0x42, 0xb7, 0x53, 0x9c, 0xab, 0x84, 0x44, 0x1c, 0xf3, 0xf5, 0x1f, + 0xfa, 0xde, 0x87, 0x3f, 0x1d, 0x5a, 0x07, 0xb0, 0x3c, 0x97, 0x51, 0xcf, 0x71, 0x48, 0xd8, 0x8d, + 0xa2, 0xa4, 0xd9, 0x88, 0x38, 0x38, 0x21, 0x85, 0x6c, 0x00, 0x3f, 0x8a, 0x8d, 0x4a, 0x9e, 0xb7, + 0x26, 0x0b, 0xfd, 0x90, 0x98, 0x1a, 0x8b, 0x5c, 0x55, 0x82, 0x91, 0x00, 0xd7, 0x7f, 0xd4, 0x20, + 0xa7, 0xee, 0x1f, 0x43, 0x3a, 0x5d, 0xed, 0x4d, 0xa7, 0x97, 0xc7, 0x8f, 0x96, 0xe1, 0x99, 0xf4, + 0xb3, 0x06, 0x2b, 0xa1, 0xd5, 0x9e, 0xd9, 0x30, 0x4c, 0xc7, 0x74, 0x2d, 0x42, 0xc3, 0x4e, 0xbd, + 0x02, 0x29, 0x3b, 0x4c, 0x1f, 0x50, 0x00, 0xa9, 0xed, 0x2a, 0x4e, 0xd9, 0x3e, 0x3a, 0x0f, 0xd9, + 0x03, 0x2f, 0x60, 0x22, 0x31, 0x64, 0xea, 0x44, 0x06, 0x5f, 0x53, 0x74, 0x1c, 0x49, 0xa0, 0x2a, + 0xa4, 0x7d, 0x8f, 0xb2, 0x20, 0x3f, 0x23, 0x0c, 0x3e, 0x37, 0xd6, 0xe0, 0xaa, 0x47, 0x99, 0xea, + 0xa5, 0xf1, 0x88, 0xe2, 0x08, 0x58, 0x02, 0xe9, 0x5f, 0xc0, 0x0b, 0x43, 0x2c, 0x97, 0x57, 0xd0, + 0x67, 0x90, 0xb1, 0x25, 0x53, 0x4d, 0xc4, 0xcb, 0x63, 0x15, 0x0e, 0xf1, 0x3f, 0x1e, 0xc4, 0xe1, + 0xc0, 0x0d, 0x51, 0xf5, 0xef, 0x35, 0x58, 0x1e, 0xb0, 0x54, 0xec, 0x12, 0x1e, 0x65, 0x22, 0x62, + 0xe9, 0xc4, 0x2e, 0xe1, 0x51, 0x86, 0x05, 0x07, 0x5d, 0x87, 0xac, 0x58, 0x45, 0x2c, 0xcf, 0x51, + 0x51, 0x2b, 0x87, 0x51, 0xab, 0x2a, 0xfa, 0x51, 0xa7, 0xf8, 0xe2, 0xe0, 0x7e, 0x56, 0x0a, 0xd9, + 0x38, 0x02, 0xe0, 0x55, 0x47, 0x28, 0xf5, 0xa8, 0x2a, 0x4c, 0x51, 0x75, 0x9b, 0x9c, 0x80, 0x25, + 0x5d, 0xff, 0x2e, 0x4e, 0x4a, 0xbe, 0x2b, 0x70, 0xfb, 0xf8, 0x8b, 0xf4, 0xcf, 0x72, 0xfe, 0x5e, + 0x58, 0x70, 0x90, 0x0f, 0x4b, 0x76, 0xdf, 0x72, 0x31, 0x71, 0xd3, 0x8d, 0x6e, 0x18, 0x79, 0x85, + 0xbc, 0xd4, 0xcf, 0xc1, 0x03, 0xe8, 0xfa, 0x2d, 0x18, 0x90, 0xe2, 0xed, 0xfe, 0x80, 0x31, 0x7f, + 0x48, 0xe1, 0x8c, 0xde, 0x66, 0x62, 0xed, 0x59, 0xe1, 0x53, 0xbd, 0x5e, 0xc5, 0x02, 0x45, 0xff, + 0x46, 0x83, 0x53, 0x43, 0x07, 0x67, 0xd4, 0xd8, 0xb4, 0x91, 0x8d, 0xad, 0xa2, 0x5e, 0x54, 0xc6, + 0xe0, 0xfc, 0x68, 0x4b, 0x7a, 0x91, 0xf9, 0x8b, 0x0f, 0x7b, 0x7f, 0xfd, 0xcf, 0x54, 0xf4, 0x22, + 0xa2, 0xab, 0xbd, 0x1b, 0xc5, 0x5b, 0x74, 0x1d, 0xae, 0x59, 0xf5, 0xd0, 0x93, 0x89, 0xf8, 0x45, + 0x3c, 0x3c, 0x20, 0x8d, 0x1a, 0xb0, 0xd8, 0x20, 0xfb, 0x66, 0xdb, 0x61, 0x4a, 0xb7, 0x8a, 0xda, + 0xe4, 0xeb, 0x26, 0xea, 0x76, 0x8a, 0x8b, 0x57, 0x7a, 0x30, 0x70, 0x1f, 0x26, 0xda, 0x80, 0x69, + 0xe6, 0x84, 0xed, 0xe6, 0x95, 0xb1, 0xd0, 0xf5, 0x9d, 0x9a, 0x91, 0x53, 0xee, 0x4f, 0xd7, 0x77, + 0x6a, 0x98, 0xdf, 0x46, 0xef, 0x43, 0x9a, 0xb6, 0x1d, 0xc2, 0x97, 0xa9, 0xe9, 0x89, 0xf6, 0x32, + 0xfe, 0xa6, 0x71, 0xf9, 0xf3, 0x53, 0x80, 0x25, 0x84, 0xfe, 0x25, 0x2c, 0xf4, 0x6c, 0x5c, 0xa8, + 0x05, 0xf3, 0x4e, 0xa2, 0x84, 0x55, 0x14, 0x2e, 0xfd, 0xaf, 0xba, 0x57, 0x0d, 0xe7, 0xa4, 0xd2, + 0x38, 0x9f, 0xe4, 0xe1, 0x1e, 0x78, 0xdd, 0x04, 0x88, 0x7d, 0xe5, 0x95, 0xc8, 0xcb, 0x47, 0x76, + 0x1b, 0x55, 0x89, 0xbc, 0xaa, 0x02, 0x2c, 0xe9, 0x7c, 0x7a, 0x05, 0xc4, 0xa2, 0x84, 0x55, 0xe2, + 0x7e, 0x19, 0x4d, 0xaf, 0x5a, 0xc4, 0xc1, 0x09, 0x29, 0xfd, 0x77, 0x0d, 0x16, 0x2a, 0xd2, 0xe4, + 0xaa, 0xe7, 0xd8, 0xd6, 0xbd, 0x63, 0x58, 0xb4, 0x6e, 0xf4, 0x2c, 0x5a, 0x4f, 0x68, 0xd3, 0x3d, + 0x86, 0x8d, 0xdc, 0xb4, 0x7e, 0xd2, 0xe0, 0xf9, 0x1e, 0xc9, 0xcd, 0xb8, 0x19, 0x45, 0x23, 0x41, + 0x1b, 0x37, 0x12, 0x7a, 0x10, 0x44, 0x69, 0x0d, 0x1d, 0x09, 0x68, 0x0b, 0x52, 0xcc, 0x53, 0x39, + 0x3a, 0x31, 0x1c, 0x21, 0x34, 0x9e, 0x6d, 0x75, 0x0f, 0xa7, 0x98, 0xa7, 0xff, 0xa2, 0x41, 0xbe, + 0x47, 0x2a, 0xd9, 0x44, 0x9f, 0xbd, 0xdd, 0x37, 0x60, 0x66, 0x9f, 0x7a, 0xad, 0xa7, 0xb1, 0x3c, + 0x0a, 0xfa, 0x55, 0xea, 0xb5, 0xb0, 0x80, 0xd1, 0xef, 0x6b, 0xb0, 0xdc, 0x23, 0x79, 0x0c, 0x0b, + 0xc9, 0x4e, 0xef, 0x42, 0x72, 0x76, 0x42, 0x1f, 0x46, 0xac, 0x25, 0xf7, 0x53, 0x7d, 0x1e, 0x70, + 0x5f, 0xd1, 0x3e, 0xe4, 0x7c, 0xaf, 0x51, 0x23, 0x0e, 0xb1, 0x98, 0x37, 0xac, 0xc0, 0x9f, 0xe4, + 0x84, 0xb9, 0x47, 0x9c, 0xf0, 0xaa, 0x71, 0xa2, 0xdb, 0x29, 0xe6, 0xaa, 0x31, 0x16, 0x4e, 0x02, + 0xa3, 0xbb, 0xb0, 0x1c, 0xed, 0xa2, 0x91, 0xb6, 0xd4, 0xd3, 0x6b, 0x3b, 0xd5, 0xed, 0x14, 0x97, + 0x2b, 0xfd, 0x88, 0x78, 0x50, 0x09, 0xba, 0x06, 0x19, 0xdb, 0x17, 0x9f, 0xdd, 0xea, 0x8b, 0xed, + 0x49, 0x8b, 0x9d, 0xfc, 0x3e, 0x97, 0x1f, 0x7f, 0xea, 0x80, 0xc3, 0xeb, 0xfa, 0x5f, 0xfd, 0x39, + 0xc0, 0x13, 0x0e, 0x6d, 0x25, 0xb6, 0x0f, 0x39, 0xf3, 0xce, 0x3d, 0xdd, 0xe6, 0xd1, 0x3b, 0x16, + 0x47, 0x37, 0xa1, 0x36, 0xb3, 0x9d, 0x92, 0xfc, 0x31, 0xa6, 0xb4, 0xed, 0xb2, 0x5d, 0x5a, 0x63, + 0xd4, 0x76, 0x9b, 0x72, 0x44, 0x27, 0xd6, 0xa2, 0xd3, 0x90, 0x51, 0x53, 0x53, 0x38, 0x9e, 0x96, + 0x5e, 0x6d, 0x4a, 0x12, 0x0e, 0x79, 0xfa, 0x51, 0x7f, 0x5e, 0x88, 0x19, 0x7a, 0xfb, 0x99, 0xe5, + 0xc5, 0x73, 0x2a, 0x1b, 0x47, 0xe7, 0xc6, 0x27, 0xf1, 0x62, 0x29, 0x33, 0x7d, 0x7d, 0xc2, 0x4c, + 0x4f, 0x4e, 0xb4, 0x91, 0x6b, 0x25, 0xfa, 0x10, 0x66, 0x89, 0x44, 0x97, 0x23, 0xf2, 0xe2, 0x84, + 0xe8, 0x71, 0x5b, 0x8d, 0x7f, 0x79, 0x50, 0x34, 0x05, 0x88, 0xde, 0xe1, 0x51, 0xe2, 0xb2, 0xfc, + 0x83, 0x5f, 0xee, 0xe1, 0x73, 0xc6, 0x4b, 0xd2, 0xd9, 0x88, 0x7c, 0xc4, 0x3f, 0x70, 0xa2, 0x23, + 0x4e, 0xde, 0xd0, 0x3f, 0x05, 0x34, 0xb8, 0xe4, 0x4c, 0xb0, 0x42, 0x9d, 0x81, 0x59, 0xb7, 0xdd, + 0xda, 0x23, 0xb2, 0x86, 0xd2, 0xb1, 0x81, 0x15, 0x41, 0xc5, 0x8a, 0x6b, 0xbc, 0xfd, 0xe0, 0x71, + 0x61, 0xea, 0xe1, 0xe3, 0xc2, 0xd4, 0xa3, 0xc7, 0x85, 0xa9, 0xaf, 0xba, 0x05, 0xed, 0x41, 0xb7, + 0xa0, 0x3d, 0xec, 0x16, 0xb4, 0x47, 0xdd, 0x82, 0xf6, 0x4f, 0xb7, 0xa0, 0x7d, 0xfb, 0x6f, 0x61, + 0xea, 0xa3, 0xfc, 0xa8, 0x5f, 0x4b, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x24, 0x03, 0xec, 0x04, + 0x48, 0x15, 0x00, 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -1302,6 +1396,128 @@ func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IngressLoadBalancerIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressLoadBalancerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IngressPortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressPortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressPortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2131,6 +2347,56 @@ func (m *IngressList) Size() (n int) { return n } +func (m *IngressLoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressLoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressPortStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *IngressRule) Size() (n int) { if m == nil { return 0 @@ -2516,6 +2782,50 @@ func (this *IngressList) String() string { }, "") return s } +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} func (this *IngressRule) String() string { if this == nil { return "nil" @@ -2576,7 +2886,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4025,6 +4335,372 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } return nil } +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index e98ab179e..c72fdc8f3 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -28,19 +28,19 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/networking/v1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -56,7 +56,7 @@ message HTTPIngressPath { // Implementations are required to support all path types. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -67,23 +67,24 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic repeated HTTPIngressPath paths = 1; } -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. message IPBlock { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // cidr is a string representing the IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" optional string cidr = 1; - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" - // Except values will be rejected if they are outside the CIDR range + // except is a slice of CIDRs that should not be included within an IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + // Except values will be rejected if they are outside the cidr range // +optional + // +listType=atomic repeated string except = 2; } @@ -95,14 +96,14 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -110,17 +111,17 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional optional IngressServiceBackend service = 4; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -132,9 +133,9 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -144,33 +145,33 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -179,15 +180,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -199,17 +200,65 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +message IngressLoadBalancerIngress { + // ip is set for load-balancer ingress points that are IP based. + // +optional + optional string ip = 1; + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + optional string hostname = 2; + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + repeated IngressPortStatus ports = 4; +} + +// IngressLoadBalancerStatus represents the status of a load-balancer. +message IngressLoadBalancerStatus { + // ingress is a list containing ingress points for the load-balancer. + // +optional + // +listType=atomic + repeated IngressLoadBalancerIngress ingress = 1; +} + +// IngressPortStatus represents the error condition of a service port +message IngressPortStatus { + // port is the port number of the ingress port. + optional int32 port = 1; + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -222,14 +271,14 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional optional string host = 1; @@ -254,48 +303,48 @@ message IngressRuleValue { // IngressServiceBackend references a Kubernetes Service as a Backend. message IngressServiceBackend { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. optional string name = 1; - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. optional ServiceBackendPort port = 2; } // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. + // ingressClassName is the name of an IngressClass cluster resource. Ingress + // controller implementations use this field to know whether they should be + // serving this Ingress resource, by a transitive connection + // (controller -> IngressClass -> Ingress resource). Although the + // `kubernetes.io/ingress.class` annotation (simple constant name) was never + // formally defined, it was widely supported by Ingress controllers to create + // a direct binding between Ingress controller and Ingress resources. Newly + // created Ingress resources should prefer using the field. However, even + // though the annotation is officially deprecated, for backwards compatibility + // reasons, ingress controllers should still honor that annotation if present. // +optional optional string ingressClassName = 4; - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional optional IngressBackend defaultBackend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional repeated IngressRule rules = 3; @@ -303,14 +352,14 @@ message IngressSpec { // IngressStatus describe the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; + optional IngressLoadBalancerStatus loadBalancer = 1; } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. @@ -318,11 +367,11 @@ message IngressTLS { // +optional repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional optional string secretName = 2; } @@ -332,9 +381,9 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional optional NetworkPolicySpec spec = 2; } @@ -343,40 +392,44 @@ message NetworkPolicy { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic repeated NetworkPolicyPeer to = 2; } // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic repeated NetworkPolicyPort ports = 1; - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional + // +listType=atomic repeated NetworkPolicyPeer from = 2; } @@ -385,34 +438,34 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated NetworkPolicy items = 2; } // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional optional IPBlock ipBlock = 3; @@ -420,80 +473,82 @@ message NetworkPolicyPeer { // NetworkPolicyPort describes a port to allow traffic on message NetworkPolicyPort { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional optional string protocol = 1; - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional optional int32 endPort = 3; } // NetworkPolicySpec provides the specification of a NetworkPolicy message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy does not allow any traffic (and serves // solely to ensure that the pods it selects are isolated by default) // +optional + // +listType=atomic repeated NetworkPolicyIngressRule ingress = 2; - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated NetworkPolicyEgressRule egress = 3; - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic repeated string policyTypes = 4; } // ServiceBackendPort is the service port being referenced. +// +structType=atomic message ServiceBackendPort { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional optional string name = 1; - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional optional int32 number = 2; diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index c49110e5c..d75e27558 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -24,18 +24,25 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired behavior for this NetworkPolicy. + // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is tombstoned to show why 3 is a reserved protobuf tag. + // This commented field should remain, so in the future if we decide to reimplement + // NetworkPolicyStatus a different protobuf name and tag SHOULD be used! + // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // PolicyType string describes the NetworkPolicy type @@ -52,65 +59,70 @@ const ( // NetworkPolicySpec provides the specification of a NetworkPolicy type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. + // podSelector selects the pods to which this NetworkPolicy object applies. + // The array of ingress rules is applied to any pods selected by this field. + // Multiple network policies can select the same set of pods. In this case, + // the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod + // ingress is a list of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod // (and cluster policy otherwise allows the traffic), OR if the traffic source is // the pod's local node, OR if the traffic matches at least one ingress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy does not allow any traffic (and serves // solely to ensure that the pods it selects are isolated by default) // +optional + // +listType=atomic Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic + // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy // otherwise allows the traffic), OR if the traffic matches at least one egress rule // across all of the NetworkPolicy objects whose podSelector matches the pod. If // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves // solely to ensure that the pods it selects are isolated by default). // This field is beta-level in 1.8 // +optional + // +listType=atomic Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - // List of rule types that the NetworkPolicy relates to. + // policyTypes is a list of rule types that the NetworkPolicy relates to. // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If this field is not specified, it will default based on the existence of ingress or egress rules; + // policies that contain an egress section are assumed to affect egress, and all policies + // (whether or not they contain an ingress section) are assumed to affect ingress. // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. // Likewise, if you want to write a policy that specifies that no egress is allowed, // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). + // an egress section and would otherwise default to just [ "Ingress" ]). // This field is beta-level in 1.8 // +optional + // +listType=atomic PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } // NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is + // ports is a list of ports which should be made accessible on the pods selected for + // this rule. Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of sources which should be able to access the pods selected for this rule. + // from is a list of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by // source). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional + // +listType=atomic From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } @@ -118,104 +130,109 @@ type NetworkPolicyIngressRule struct { // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. + // ports is a list of destination ports for outgoing traffic. // Each item in this list is combined using a logical OR. If this field is // empty or missing, this rule matches all ports (traffic not restricted by port). // If this field is present and contains at least one item, then this rule allows // traffic only if the traffic matches at least one port in the list. // +optional + // +listType=atomic Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - // List of destinations for outgoing traffic of pods selected for this rule. + // to is a list of destinations for outgoing traffic of pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all destinations (traffic not restricted by // destination). If this field is present and contains at least one item, this rule // allows traffic only if the traffic matches at least one item in the to list. // +optional + // +listType=atomic To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` } // NetworkPolicyPort describes a port to allow traffic on type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. + // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. // +optional Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - // The port on the given protocol. This can either be a numerical or named + // port represents the port on the given protocol. This can either be a numerical or named // port on a pod. If this field is not provided, this matches all port names and // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` - // If set, indicates that the range of ports from port to endPort, inclusive, + // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field // is not defined or if the port field is defined as a named (string) port. // The endPort must be equal or greater than port. - // This feature is in Beta state and is enabled by default. - // It can be disabled using the Feature Gate "NetworkPolicyEndPort". // +optional EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"` } -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed // to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs // that should not be included within this rule. type IPBlock struct { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // cidr is a string representing the IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" - // Except values will be rejected if they are outside the CIDR range + + // except is a slice of CIDRs that should not be included within an IPBlock + // Valid examples are "192.168.1.0/24" or "2001:db8::/64" + // Except values will be rejected if they are outside the cidr range // +optional + // +listType=atomic Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } // NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of // fields are allowed type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label + // podSelector is a label selector which selects pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. + // namespaceSelector selects namespaces using cluster-scoped labels. This field follows + // standard label selector semantics; if present but empty, it selects all namespaces. // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // If podSelector is also set, then the NetworkPolicyPeer as a whole selects + // the pods matching podSelector in the namespaces selected by namespaceSelector. + // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - // IPBlock defines policy on a particular IPBlock. If this field is set then + // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. // +optional IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services @@ -223,104 +240,156 @@ type NetworkPolicyList struct { // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. + // ingressClassName is the name of an IngressClass cluster resource. Ingress + // controller implementations use this field to know whether they should be + // serving this Ingress resource, by a transitive connection + // (controller -> IngressClass -> Ingress resource). Although the + // `kubernetes.io/ingress.class` annotation (simple constant name) was never + // formally defined, it was widely supported by Ingress controllers to create + // a direct binding between Ingress controller and Ingress resources. Newly + // created Ingress resources should prefer using the field. However, even + // though the annotation is officially deprecated, for backwards compatibility + // reasons, ingress controllers should still honor that annotation if present. // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // DefaultBackend is the backend that should handle requests that don't + // defaultBackend is the backend that should handle requests that don't // match any rule. If Rules are not specified, DefaultBackend must be specified. // If DefaultBackend is not set, the handling of requests that do not match any // of the rules will be up to the Ingress controller. // +optional DefaultBackend *IngressBackend `json:"defaultBackend,omitempty" protobuf:"bytes,1,opt,name=defaultBackend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +listType=atomic // +optional TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. + // rules is a list of host rules used to configure the Ingress. If unspecified, + // or no rule matches, all traffic is sent to the default backend. // +listType=atomic // +optional Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` } -// IngressTLS describes the transport layer security associated with an Ingress. +// IngressTLS describes the transport layer security associated with an ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +listType=atomic // +optional Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. + // and value of the "Host" header is used for routing. // +optional SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` } // IngressStatus describe the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressLoadBalancerStatus represents the status of a load-balancer. +type IngressLoadBalancerStatus struct { + // ingress is a list containing ingress points for the load-balancer. // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + // +listType=atomic + Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +type IngressLoadBalancerIngress struct { + // ip is set for load-balancer ingress points that are IP based. + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` +} + +// IngressPortStatus represents the error condition of a service port +type IngressPortStatus struct { + // port is the port number of the ingress port. + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -333,14 +402,14 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header + // 1. If host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If host is a wildcard, then the request matches this rule if the http host header // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` @@ -350,7 +419,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -368,7 +437,7 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -407,14 +476,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -430,19 +499,19 @@ type HTTPIngressPath struct { // Implementations are required to support all path types. PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Service references a Service as a Backend. + // service references a service as a backend. // This is a mutually exclusive setting with "Resource". // +optional Service *IngressServiceBackend `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, a service.Name and // service.Port must not be specified. // This is a mutually exclusive setting with "Service". @@ -452,23 +521,24 @@ type IngressBackend struct { // IngressServiceBackend references a Kubernetes Service as a Backend. type IngressServiceBackend struct { - // Name is the referenced service. The service must exist in + // name is the referenced service. The service must exist in // the same namespace as the Ingress object. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Port of the referenced service. A port name or port number + // port of the referenced service. A port name or port number // is required for a IngressServiceBackend. Port ServiceBackendPort `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } // ServiceBackendPort is the service port being referenced. +// +structType=atomic type ServiceBackendPort struct { - // Name is the name of the port on the Service. + // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number is the numerical port number (e.g. 80) on the Service. + // number is the numerical port number (e.g. 80) on the Service. // This is a mutually exclusive setting with "Name". // +optional Number int32 `json:"number,omitempty" protobuf:"bytes,2,opt,name=number"` @@ -477,6 +547,7 @@ type ServiceBackendPort struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClass represents the class of the Ingress, referenced by the Ingress // Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be @@ -485,12 +556,13 @@ type ServiceBackendPort struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -498,15 +570,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -517,7 +589,7 @@ const ( // IngressClassParametersReferenceScopeNamespace indicates that the // referenced Parameters resource is namespace-scoped. IngressClassParametersReferenceScopeNamespace = "Namespace" - // IngressClassParametersReferenceScopeNamespace indicates that the + // IngressClassParametersReferenceScopeCluster indicates that the // referenced Parameters resource is cluster-scoped. IngressClassParametersReferenceScopeCluster = "Cluster" ) @@ -525,20 +597,24 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". // +optional Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -546,14 +622,16 @@ type IngressClassParametersReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClassList is a collection of IngressClasses. type IngressClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index 38bd9c502..ff080540d 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { @@ -48,9 +48,9 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { } var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "except": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -60,8 +60,8 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -70,8 +70,8 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "service": "Service references a Service as a Backend. This is a mutually exclusive setting with \"Resource\".", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", + "service": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\".", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -91,7 +91,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -100,11 +100,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -124,16 +124,47 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { return map_IngressList } +var map_IngressLoadBalancerIngress = map[string]string{ + "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", +} + +func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerIngress +} + +var map_IngressLoadBalancerStatus = map[string]string{ + "": "IngressLoadBalancerStatus represents the status of a load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", +} + +func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerStatus +} + +var map_IngressPortStatus = map[string]string{ + "": "IngressPortStatus represents the error condition of a service port", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (IngressPortStatus) SwaggerDoc() map[string]string { + return map_IngressPortStatus +} + var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -150,8 +181,8 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressServiceBackend = map[string]string{ "": "IngressServiceBackend references a Kubernetes Service as a Backend.", - "name": "Name is the referenced service. The service must exist in the same namespace as the Ingress object.", - "port": "Port of the referenced service. A port name or port number is required for a IngressServiceBackend.", + "name": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "port": "port of the referenced service. A port name or port number is required for a IngressServiceBackend.", } func (IngressServiceBackend) SwaggerDoc() map[string]string { @@ -160,10 +191,10 @@ func (IngressServiceBackend) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "defaultBackend": "DefaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -> IngressClass -> Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "defaultBackend": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -172,7 +203,7 @@ func (IngressSpec) SwaggerDoc() map[string]string { var map_IngressStatus = map[string]string{ "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -180,9 +211,9 @@ func (IngressStatus) SwaggerDoc() map[string]string { } var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "": "IngressTLS describes the transport layer security associated with an ingress.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { @@ -192,7 +223,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", + "spec": "spec represents the specification of the desired behavior for this NetworkPolicy.", } func (NetworkPolicy) SwaggerDoc() map[string]string { @@ -201,8 +232,8 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { var map_NetworkPolicyEgressRule = map[string]string{ "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "ports": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { @@ -211,8 +242,8 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "ports": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -222,7 +253,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (NetworkPolicyList) SwaggerDoc() map[string]string { @@ -231,9 +262,9 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { var map_NetworkPolicyPeer = map[string]string{ "": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", + "podSelector": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace.", + "namespaceSelector": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector.", + "ipBlock": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", } func (NetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -242,9 +273,9 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { var map_NetworkPolicyPort = map[string]string{ "": "NetworkPolicyPort describes a port to allow traffic on", - "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", - "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".", + "protocol": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", + "endPort": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", } func (NetworkPolicyPort) SwaggerDoc() map[string]string { @@ -253,10 +284,10 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { var map_NetworkPolicySpec = map[string]string{ "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "podSelector": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -265,8 +296,8 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { var map_ServiceBackendPort = map[string]string{ "": "ServiceBackendPort is the service port being referenced.", - "name": "Name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", - "number": "Number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "number": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", } func (ServiceBackendPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 82677ca8b..540873833 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -293,6 +293,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]IngressPortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress. +func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress { + if in == nil { + return nil + } + out := new(IngressLoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressLoadBalancerIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus. +func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus { + if in == nil { + return nil + } + out := new(IngressLoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus. +func (in *IngressPortStatus) DeepCopy() *IngressPortStatus { + if in == nil { + return nil + } + out := new(IngressPortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressRule) DeepCopyInto(out *IngressRule) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..21e8c671a --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Ingress) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClass) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/doc.go b/vendor/k8s.io/api/networking/v1alpha1/doc.go new file mode 100644 index 000000000..3827b0418 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=networking.k8s.io + +package v1alpha1 // import "k8s.io/api/networking/v1alpha1" diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go new file mode 100644 index 000000000..0d4203483 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -0,0 +1,1929 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/networking/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{0} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{1} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{2} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{3} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{4} +} +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) +} +func (m *ServiceCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo + +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{5} +} +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) +} +func (m *ServiceCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo + +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{6} +} +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) +} +func (m *ServiceCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c1cb39e7b48ce50d, []int{7} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus") +} + +func init() { + proto.RegisterFile("k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1cb39e7b48ce50d) +} + +var fileDescriptor_c1cb39e7b48ce50d = []byte{ + // 634 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4a, + 0x18, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0xb7, 0xb7, 0xb7, 0x5e, 0x45, 0x5d, 0x38, 0x91, 0xef, 0xa6, + 0x08, 0x3a, 0x26, 0x11, 0x42, 0x6c, 0x71, 0x2b, 0xa1, 0x4a, 0xd0, 0x96, 0xe9, 0x0a, 0xd4, 0x05, + 0xd3, 0xc9, 0x57, 0x67, 0x08, 0xfe, 0xd1, 0xcc, 0x24, 0xc0, 0x8e, 0x47, 0xe0, 0x05, 0x78, 0x0e, + 0x56, 0x20, 0xb1, 0xeb, 0xb2, 0xcb, 0xae, 0x2a, 0x6a, 0x5e, 0x04, 0xcd, 0xd8, 0xb1, 0x93, 0x46, + 0xfd, 0xdb, 0x74, 0xe7, 0xef, 0xcc, 0x39, 0x67, 0xbe, 0xf3, 0xcd, 0x8c, 0x8c, 0xf0, 0xf0, 0x99, + 0xc4, 0x3c, 0xf1, 0x69, 0xca, 0xfd, 0x18, 0xd4, 0xc7, 0x44, 0x0c, 0x79, 0x1c, 0xfa, 0xe3, 0x2e, + 0xfd, 0x90, 0x0e, 0x68, 0xd7, 0x0f, 0x21, 0x06, 0x41, 0x15, 0xf4, 0x71, 0x2a, 0x12, 0x95, 0x38, + 0x6e, 0xce, 0xc7, 0x34, 0xe5, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0x33, 0xe4, 0x6a, 0x30, 0x3a, + 0xc2, 0x2c, 0x89, 0xfc, 0x30, 0x09, 0x13, 0xdf, 0xc8, 0x8e, 0x46, 0xc7, 0xa6, 0x32, 0x85, 0xf9, + 0xca, 0xed, 0xd6, 0x9f, 0x54, 0xdb, 0x47, 0x94, 0x0d, 0x78, 0x0c, 0xe2, 0xb3, 0x9f, 0x0e, 0x43, + 0x0d, 0x48, 0x3f, 0x02, 0x45, 0xfd, 0xf1, 0x5c, 0x13, 0xeb, 0xfe, 0x55, 0x2a, 0x31, 0x8a, 0x15, + 0x8f, 0x60, 0x4e, 0xf0, 0xf4, 0x26, 0x81, 0x64, 0x03, 0x88, 0xe8, 0x65, 0x9d, 0xf7, 0xd3, 0x42, + 0xf6, 0xce, 0xfe, 0xf3, 0x7e, 0x5f, 0x80, 0x94, 0xce, 0x3b, 0xb4, 0xac, 0x3b, 0xea, 0x53, 0x45, + 0x5b, 0x56, 0xc7, 0xda, 0x68, 0xf6, 0x1e, 0xe3, 0x6a, 0x1c, 0xa5, 0x31, 0x4e, 0x87, 0xa1, 0x06, + 0x24, 0xd6, 0x6c, 0x3c, 0xee, 0xe2, 0xbd, 0xa3, 0xf7, 0xc0, 0xd4, 0x2b, 0x50, 0x34, 0x70, 0x4e, + 0xce, 0xdb, 0xb5, 0xec, 0xbc, 0x8d, 0x2a, 0x8c, 0x94, 0xae, 0xce, 0x1e, 0xaa, 0xcb, 0x14, 0x58, + 0x6b, 0xc1, 0xb8, 0x6f, 0xe2, 0xeb, 0x87, 0x8d, 0xcb, 0xd6, 0x0e, 0x52, 0x60, 0xc1, 0x3f, 0x85, + 0x75, 0x5d, 0x57, 0xc4, 0x18, 0x79, 0x3f, 0x2c, 0xb4, 0x52, 0xb2, 0x5e, 0x72, 0xa9, 0x9c, 0xc3, + 0xb9, 0x10, 0xf8, 0x76, 0x21, 0xb4, 0xda, 0x44, 0xf8, 0xaf, 0xd8, 0x67, 0x79, 0x82, 0x4c, 0x05, + 0xd8, 0x45, 0x0d, 0xae, 0x20, 0x92, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xf7, 0xe0, 0xd6, 0x09, + 0x82, 0x95, 0xc2, 0xb5, 0xb1, 0xa3, 0xf5, 0x24, 0xb7, 0xf1, 0xa2, 0xa9, 0xf6, 0x75, 0x2c, 0xe7, + 0x10, 0xd9, 0x29, 0x15, 0x10, 0x2b, 0x02, 0xc7, 0x45, 0xff, 0xfe, 0x4d, 0x9b, 0xec, 0x4f, 0x04, + 0x20, 0x20, 0x66, 0x10, 0xac, 0x64, 0xe7, 0x6d, 0xbb, 0x04, 0x49, 0x65, 0xe8, 0x7d, 0xb7, 0xd0, + 0xea, 0x25, 0xb6, 0xf3, 0x3f, 0x6a, 0x84, 0x22, 0x19, 0xa5, 0x66, 0x37, 0xbb, 0xea, 0xf3, 0x85, + 0x06, 0x49, 0xbe, 0xe6, 0x3c, 0x42, 0xcb, 0x02, 0x64, 0x32, 0x12, 0x0c, 0xcc, 0xe1, 0xd9, 0xd5, + 0x94, 0x48, 0x81, 0x93, 0x92, 0xe1, 0xf8, 0xc8, 0x8e, 0x69, 0x04, 0x32, 0xa5, 0x0c, 0x5a, 0x8b, + 0x86, 0xbe, 0x56, 0xd0, 0xed, 0xdd, 0xc9, 0x02, 0xa9, 0x38, 0x4e, 0x07, 0xd5, 0x75, 0xd1, 0xaa, + 0x1b, 0x6e, 0x79, 0xd0, 0x9a, 0x4b, 0xcc, 0x8a, 0xf7, 0x6d, 0x01, 0x35, 0x0f, 0x40, 0x8c, 0x39, + 0x83, 0xad, 0x9d, 0x6d, 0x72, 0x0f, 0x77, 0xf5, 0xf5, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, + 0xbb, 0xea, 0xb6, 0x3a, 0x6f, 0xd0, 0x92, 0x54, 0x54, 0x8d, 0xa4, 0x19, 0x4a, 0xb3, 0xd7, 0xbd, + 0x8b, 0xa9, 0x11, 0x06, 0xff, 0x16, 0xb6, 0x4b, 0x79, 0x4d, 0x0a, 0x43, 0xef, 0x97, 0x85, 0x56, + 0xa7, 0xd8, 0xf7, 0xf0, 0x14, 0xf6, 0x67, 0x9f, 0xc2, 0xc3, 0x3b, 0x64, 0xb9, 0xe2, 0x31, 0xf4, + 0x66, 0x22, 0x98, 0xe7, 0xd0, 0x46, 0x0d, 0xc6, 0xfb, 0x42, 0xb6, 0xac, 0xce, 0xe2, 0x86, 0x1d, + 0xd8, 0x5a, 0xa3, 0x17, 0x25, 0xc9, 0x71, 0xef, 0x13, 0x5a, 0x9b, 0x1b, 0x92, 0xc3, 0x10, 0x62, + 0x49, 0xdc, 0xe7, 0x8a, 0x27, 0x71, 0x2e, 0x9d, 0x3d, 0xc0, 0x6b, 0xa2, 0x6f, 0x4d, 0x74, 0xd5, + 0xed, 0x28, 0x21, 0x49, 0xa6, 0x6c, 0x83, 0xed, 0x93, 0x0b, 0xb7, 0x76, 0x7a, 0xe1, 0xd6, 0xce, + 0x2e, 0xdc, 0xda, 0x97, 0xcc, 0xb5, 0x4e, 0x32, 0xd7, 0x3a, 0xcd, 0x5c, 0xeb, 0x2c, 0x73, 0xad, + 0xdf, 0x99, 0x6b, 0x7d, 0xfd, 0xe3, 0xd6, 0xde, 0xba, 0xd7, 0xff, 0x7f, 0xfe, 0x06, 0x00, 0x00, + 0xff, 0xff, 0xb1, 0xd0, 0x33, 0x02, 0xa0, 0x06, 0x00, 0x00, +} + +func (m *IPAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceCIDR) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceCIDRList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *IPAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParentReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ServiceCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto new file mode 100644 index 000000000..80ec6af73 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -0,0 +1,142 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.networking.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/networking/v1alpha1"; + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; +} + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. + // This field is immutable. + // +optional + // +listType=atomic + repeated string cidrs = 1; +} + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; +} + diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go new file mode 100644 index 000000000..c8f5856b5 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package. +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register objects in this package. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder holds functions that add things to a scheme. + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + + // AddToScheme adds the types of this group into the given scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &IPAddress{}, + &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go new file mode 100644 index 000000000..0e454f026 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -0,0 +1,154 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. + // This field is immutable. + // +optional + // +listType=atomic + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.27 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..4c8eb57a7 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,110 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go new file mode 100644 index 000000000..5f9c23f70 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..5c8f697ba --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,229 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { + if in == nil { + return nil + } + out := new(ServiceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { + if in == nil { + return nil + } + out := new(ServiceCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { + if in == nil { + return nil + } + out := new(ServiceCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { + if in == nil { + return nil + } + out := new(ServiceCIDRStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..714e7b625 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,94 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 27 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 30 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 33 +} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index cda115147..a924725f2 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto +// source: k8s.io/api/networking/v1beta1/generated.proto package v1beta1 @@ -25,7 +25,9 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -47,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{0} + return fileDescriptor_9497719c79c89d2d, []int{0} } func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +77,7 @@ var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{1} + return fileDescriptor_9497719c79c89d2d, []int{1} } func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -100,10 +102,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{2} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{3} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{4} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{2} + return fileDescriptor_9497719c79c89d2d, []int{5} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +217,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{3} + return fileDescriptor_9497719c79c89d2d, []int{6} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +245,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressClass) Reset() { *m = IngressClass{} } func (*IngressClass) ProtoMessage() {} func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{4} + return fileDescriptor_9497719c79c89d2d, []int{7} } func (m *IngressClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +273,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo func (m *IngressClassList) Reset() { *m = IngressClassList{} } func (*IngressClassList) ProtoMessage() {} func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{5} + return fileDescriptor_9497719c79c89d2d, []int{8} } func (m *IngressClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +301,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} } func (*IngressClassParametersReference) ProtoMessage() {} func (*IngressClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{6} + return fileDescriptor_9497719c79c89d2d, []int{9} } func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +329,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } func (*IngressClassSpec) ProtoMessage() {} func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{7} + return fileDescriptor_9497719c79c89d2d, []int{10} } func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +357,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{8} + return fileDescriptor_9497719c79c89d2d, []int{11} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -296,10 +382,94 @@ func (m *IngressList) XXX_DiscardUnknown() { var xxx_messageInfo_IngressList proto.InternalMessageInfo +func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } +func (*IngressLoadBalancerIngress) ProtoMessage() {} +func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{12} +} +func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src) +} +func (m *IngressLoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo + +func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } +func (*IngressLoadBalancerStatus) ProtoMessage() {} +func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{13} +} +func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src) +} +func (m *IngressLoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo + +func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } +func (*IngressPortStatus) ProtoMessage() {} +func (*IngressPortStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{14} +} +func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressPortStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressPortStatus.Merge(m, src) +} +func (m *IngressPortStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressPortStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressPortStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo + func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{9} + return fileDescriptor_9497719c79c89d2d, []int{15} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +497,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{10} + return fileDescriptor_9497719c79c89d2d, []int{16} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +525,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{11} + return fileDescriptor_9497719c79c89d2d, []int{17} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +553,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{12} + return fileDescriptor_9497719c79c89d2d, []int{18} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +581,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{13} + return fileDescriptor_9497719c79c89d2d, []int{19} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -436,9 +606,152 @@ func (m *IngressTLS) XXX_DiscardUnknown() { var xxx_messageInfo_IngressTLS proto.InternalMessageInfo +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{20} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{21} +} +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) +} +func (m *ServiceCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo + +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{22} +} +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) +} +func (m *ServiceCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo + +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{23} +} +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) +} +func (m *ServiceCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{24} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1beta1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1beta1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1beta1.IPAddressSpec") proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass") @@ -446,87 +759,119 @@ func init() { proto.RegisterType((*IngressClassParametersReference)(nil), "k8s.io.api.networking.v1beta1.IngressClassParametersReference") proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1beta1.IngressClassSpec") proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") + proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.networking.v1beta1.IngressLoadBalancerIngress") + proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.networking.v1beta1.IngressLoadBalancerStatus") + proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.networking.v1beta1.IngressPortStatus") proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1beta1.ParentReference") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRStatus") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) -} - -var fileDescriptor_5bea11de0ceb8f53 = []byte{ - // 1085 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x41, 0x6f, 0xe3, 0x44, - 0x14, 0xae, 0x93, 0x66, 0x9b, 0x4e, 0xd2, 0x6e, 0x35, 0xf4, 0x10, 0x55, 0x22, 0xa9, 0x7c, 0x40, - 0x85, 0xa5, 0x36, 0xcd, 0x2e, 0x88, 0x13, 0x02, 0xaf, 0x04, 0xad, 0x36, 0x6c, 0xc3, 0x24, 0x02, - 0x84, 0x38, 0xec, 0xc4, 0x79, 0xeb, 0x98, 0x38, 0xb6, 0x99, 0x19, 0x07, 0xed, 0x8d, 0x2b, 0x27, - 0xf8, 0x15, 0xfc, 0x04, 0xc4, 0x11, 0xc1, 0xa5, 0xc7, 0x3d, 0xee, 0x85, 0x8a, 0x86, 0x7f, 0xc1, - 0x09, 0xcd, 0x78, 0x62, 0x3b, 0x49, 0xcb, 0xa6, 0x1c, 0xf6, 0x94, 0xcc, 0x7b, 0xdf, 0x7b, 0x6f, - 0xde, 0x7b, 0xdf, 0xbc, 0x67, 0xf4, 0xf1, 0xf8, 0x7d, 0x6e, 0xf9, 0x91, 0x3d, 0x4e, 0x06, 0xc0, - 0x42, 0x10, 0xc0, 0xed, 0x29, 0x84, 0xc3, 0x88, 0xd9, 0x5a, 0x41, 0x63, 0xdf, 0x0e, 0x41, 0x7c, - 0x17, 0xb1, 0xb1, 0x1f, 0x7a, 0xf6, 0xf4, 0x64, 0x00, 0x82, 0x9e, 0xd8, 0x1e, 0x84, 0xc0, 0xa8, - 0x80, 0xa1, 0x15, 0xb3, 0x48, 0x44, 0xf8, 0xf5, 0x14, 0x6e, 0xd1, 0xd8, 0xb7, 0x72, 0xb8, 0xa5, - 0xe1, 0x07, 0xc7, 0x9e, 0x2f, 0x46, 0xc9, 0xc0, 0x72, 0xa3, 0x89, 0xed, 0x45, 0x5e, 0x64, 0x2b, - 0xab, 0x41, 0xf2, 0x54, 0x9d, 0xd4, 0x41, 0xfd, 0x4b, 0xbd, 0x1d, 0x98, 0x85, 0xe0, 0x6e, 0xc4, - 0xc0, 0x9e, 0xae, 0x44, 0x3c, 0x78, 0x90, 0x63, 0x26, 0xd4, 0x1d, 0xf9, 0x21, 0xb0, 0x67, 0x76, - 0x3c, 0xf6, 0xa4, 0x80, 0xdb, 0x13, 0x10, 0xf4, 0x3a, 0x2b, 0xfb, 0x26, 0x2b, 0x96, 0x84, 0xc2, - 0x9f, 0xc0, 0x8a, 0xc1, 0x7b, 0x2f, 0x33, 0xe0, 0xee, 0x08, 0x26, 0x74, 0xc5, 0xee, 0xfe, 0x4d, - 0x76, 0x89, 0xf0, 0x03, 0xdb, 0x0f, 0x05, 0x17, 0x6c, 0xd9, 0xc8, 0xfc, 0xc3, 0x40, 0x77, 0x4f, - 0xfb, 0xfd, 0xee, 0x59, 0xe8, 0x31, 0xe0, 0xbc, 0x4b, 0xc5, 0x08, 0x1f, 0xa2, 0xcd, 0x98, 0x8a, - 0x51, 0xc3, 0x38, 0x34, 0x8e, 0xb6, 0x9d, 0xfa, 0xc5, 0x65, 0x6b, 0x63, 0x76, 0xd9, 0xda, 0x94, - 0x3a, 0xa2, 0x34, 0xf8, 0x01, 0xaa, 0xca, 0xdf, 0xfe, 0xb3, 0x18, 0x1a, 0x65, 0x85, 0x6a, 0xcc, - 0x2e, 0x5b, 0xd5, 0xae, 0x96, 0xfd, 0x53, 0xf8, 0x4f, 0x32, 0x24, 0xfe, 0x12, 0x6d, 0x0d, 0xa8, - 0x3b, 0x86, 0x70, 0xd8, 0x28, 0x1d, 0x1a, 0x47, 0xb5, 0xf6, 0xb1, 0xf5, 0x9f, 0x3d, 0xb4, 0xf4, - 0xa5, 0x9c, 0xd4, 0xc8, 0xb9, 0xab, 0x6f, 0xb2, 0xa5, 0x05, 0x64, 0xee, 0xce, 0x1c, 0xa3, 0xfd, - 0x42, 0x12, 0x24, 0x09, 0xe0, 0x73, 0x1a, 0x24, 0x80, 0x7b, 0xa8, 0x22, 0xa3, 0xf3, 0x86, 0x71, - 0x58, 0x3e, 0xaa, 0xb5, 0xad, 0x97, 0xc4, 0x5b, 0x2a, 0x84, 0xb3, 0xa3, 0x03, 0x56, 0xe4, 0x89, - 0x93, 0xd4, 0x97, 0xf9, 0x63, 0x09, 0x6d, 0x69, 0x14, 0x7e, 0x82, 0xaa, 0xb2, 0xef, 0x43, 0x2a, - 0xa8, 0x2a, 0x57, 0xad, 0xfd, 0x4e, 0x21, 0x46, 0xd6, 0x06, 0x2b, 0x1e, 0x7b, 0x52, 0xc0, 0x2d, - 0x89, 0xb6, 0xa6, 0x27, 0xd6, 0xf9, 0xe0, 0x1b, 0x70, 0xc5, 0xa7, 0x20, 0xa8, 0x83, 0x75, 0x14, - 0x94, 0xcb, 0x48, 0xe6, 0x15, 0x77, 0xd0, 0x26, 0x8f, 0xc1, 0xd5, 0x15, 0x7b, 0x6b, 0xbd, 0x8a, - 0xf5, 0x62, 0x70, 0xf3, 0xc6, 0xc9, 0x13, 0x51, 0x5e, 0x70, 0x1f, 0xdd, 0xe1, 0x82, 0x8a, 0x84, - 0xab, 0xb6, 0xd5, 0xda, 0x6f, 0xaf, 0xe9, 0x4f, 0xd9, 0x38, 0xbb, 0xda, 0xe3, 0x9d, 0xf4, 0x4c, - 0xb4, 0x2f, 0xf3, 0x87, 0x12, 0xda, 0x5d, 0xec, 0x15, 0x7e, 0x17, 0xd5, 0x38, 0xb0, 0xa9, 0xef, - 0xc2, 0x63, 0x3a, 0x01, 0x4d, 0xa5, 0xd7, 0xb4, 0x7d, 0xad, 0x97, 0xab, 0x48, 0x11, 0x87, 0xbd, - 0xcc, 0xac, 0x1b, 0x31, 0xa1, 0x93, 0xbe, 0xb9, 0xa4, 0x92, 0xd9, 0x56, 0xca, 0x6c, 0xeb, 0x2c, - 0x14, 0xe7, 0xac, 0x27, 0x98, 0x1f, 0x7a, 0x2b, 0x81, 0xa4, 0x33, 0x52, 0xf4, 0x8c, 0xbf, 0x40, - 0x55, 0x06, 0x3c, 0x4a, 0x98, 0x0b, 0xba, 0x14, 0x0b, 0x64, 0x94, 0x23, 0x40, 0xb6, 0x49, 0xf2, - 0x76, 0xd8, 0x89, 0x5c, 0x1a, 0xa4, 0xcd, 0x21, 0xf0, 0x14, 0x18, 0x84, 0x2e, 0x38, 0x75, 0x49, - 0x78, 0xa2, 0x5d, 0x90, 0xcc, 0x99, 0x7c, 0x50, 0x75, 0x5d, 0x8b, 0x87, 0x01, 0x7d, 0x25, 0x14, - 0xf9, 0x6c, 0x81, 0x22, 0xf6, 0x7a, 0x2d, 0x55, 0x97, 0xbb, 0x89, 0x27, 0xe6, 0xef, 0x06, 0xda, - 0x2b, 0x02, 0x3b, 0x3e, 0x17, 0xf8, 0xeb, 0x95, 0x4c, 0xac, 0xf5, 0x32, 0x91, 0xd6, 0x2a, 0x8f, - 0x3d, 0x1d, 0xaa, 0x3a, 0x97, 0x14, 0xb2, 0xe8, 0xa2, 0x8a, 0x2f, 0x60, 0xc2, 0x1b, 0x25, 0xf5, - 0x56, 0xef, 0xdd, 0x22, 0x8d, 0xfc, 0xa1, 0x9e, 0x49, 0x0f, 0x24, 0x75, 0x64, 0xfe, 0x69, 0xa0, - 0x56, 0x11, 0xd6, 0xa5, 0x8c, 0x4e, 0x40, 0x00, 0xe3, 0x59, 0x1b, 0xf1, 0x11, 0xaa, 0xd2, 0xee, - 0xd9, 0x27, 0x2c, 0x4a, 0xe2, 0xf9, 0xbc, 0x93, 0xf7, 0xfb, 0x48, 0xcb, 0x48, 0xa6, 0x95, 0x53, - 0x71, 0xec, 0xeb, 0xd1, 0x55, 0x98, 0x8a, 0x8f, 0xfc, 0x70, 0x48, 0x94, 0x46, 0x22, 0x42, 0x49, - 0xf6, 0xf2, 0x22, 0x42, 0xb1, 0x5c, 0x69, 0x70, 0x0b, 0x55, 0xb8, 0x1b, 0xc5, 0xd0, 0xd8, 0x54, - 0x90, 0x6d, 0x79, 0xe5, 0x9e, 0x14, 0x90, 0x54, 0x8e, 0xef, 0xa1, 0x6d, 0x09, 0xe4, 0x31, 0x75, - 0xa1, 0x51, 0x51, 0xa0, 0x9d, 0xd9, 0x65, 0x6b, 0xfb, 0xf1, 0x5c, 0x48, 0x72, 0xbd, 0xf9, 0xcb, - 0x52, 0x93, 0x64, 0xff, 0x70, 0x1b, 0x21, 0x37, 0x0a, 0x05, 0x8b, 0x82, 0x00, 0x98, 0x4e, 0x29, - 0xa3, 0xcf, 0xc3, 0x4c, 0x43, 0x0a, 0x28, 0x1c, 0x22, 0x14, 0x67, 0xb5, 0xd1, 0x34, 0xfa, 0xe0, - 0x16, 0xf5, 0xbf, 0xa6, 0xb0, 0xce, 0xae, 0x8c, 0x57, 0x50, 0x14, 0x22, 0x98, 0xbf, 0x1a, 0xa8, - 0xa6, 0xed, 0x5f, 0x01, 0xb1, 0x1e, 0x2d, 0x12, 0xeb, 0x8d, 0x35, 0x97, 0xce, 0xf5, 0x9c, 0xfa, - 0x39, 0xbf, 0xba, 0x5c, 0x33, 0xb2, 0xe7, 0xa3, 0x88, 0x8b, 0xe5, 0x5d, 0x79, 0x1a, 0x71, 0x41, - 0x94, 0x06, 0x27, 0x68, 0xcf, 0x5f, 0xda, 0x4b, 0xb7, 0x7b, 0xa9, 0x99, 0x99, 0xd3, 0xd0, 0xee, - 0xf7, 0x96, 0x35, 0x64, 0x25, 0x84, 0x09, 0x68, 0x05, 0x25, 0x07, 0xc5, 0x48, 0x88, 0x58, 0xd7, - 0xf8, 0xfe, 0xfa, 0xdb, 0x30, 0xbf, 0x42, 0x55, 0x65, 0xd7, 0xef, 0x77, 0x89, 0x72, 0x65, 0xfe, - 0x56, 0xca, 0xea, 0xa1, 0xe8, 0xf7, 0x61, 0x96, 0xad, 0x62, 0x86, 0x1a, 0xfe, 0x29, 0xd9, 0xf7, - 0x0b, 0x17, 0xcf, 0x74, 0x64, 0x05, 0x8d, 0xfb, 0xf9, 0x57, 0x82, 0xf1, 0x7f, 0xbe, 0x12, 0x6a, - 0xd7, 0x7d, 0x21, 0xe0, 0x53, 0x54, 0x16, 0xc1, 0x9c, 0x02, 0x6f, 0xae, 0xe7, 0xb1, 0xdf, 0xe9, - 0x39, 0x35, 0x5d, 0xf2, 0x72, 0xbf, 0xd3, 0x23, 0xd2, 0x05, 0x3e, 0x47, 0x15, 0x96, 0x04, 0x20, - 0x37, 0x68, 0x79, 0xfd, 0x8d, 0x2c, 0x2b, 0x98, 0x53, 0x4a, 0x9e, 0x38, 0x49, 0xfd, 0x98, 0xdf, - 0xa2, 0x9d, 0x85, 0x35, 0x8b, 0x9f, 0xa0, 0x7a, 0x10, 0xd1, 0xa1, 0x43, 0x03, 0x1a, 0xba, 0xfa, - 0x11, 0x2f, 0xf1, 0x76, 0xbe, 0x9f, 0x3a, 0x05, 0x9c, 0x5e, 0xd2, 0xfb, 0x3a, 0x48, 0xbd, 0xa8, - 0x23, 0x0b, 0x1e, 0x4d, 0x8a, 0x50, 0x9e, 0xa3, 0x9c, 0x4a, 0x92, 0xa9, 0xe9, 0x57, 0x92, 0x9e, - 0x4a, 0x92, 0xc0, 0x9c, 0xa4, 0x72, 0x39, 0x53, 0x38, 0xb8, 0x0c, 0x84, 0x6a, 0x67, 0x69, 0x71, - 0xa6, 0xf4, 0x32, 0x0d, 0x29, 0xa0, 0x9c, 0xe3, 0x8b, 0xab, 0xe6, 0xc6, 0xf3, 0xab, 0xe6, 0xc6, - 0x8b, 0xab, 0xe6, 0xc6, 0xf7, 0xb3, 0xa6, 0x71, 0x31, 0x6b, 0x1a, 0xcf, 0x67, 0x4d, 0xe3, 0xc5, - 0xac, 0x69, 0xfc, 0x35, 0x6b, 0x1a, 0x3f, 0xfd, 0xdd, 0xdc, 0xf8, 0x6a, 0x4b, 0x97, 0xe9, 0xdf, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x8b, 0x3b, 0x2e, 0x16, 0x0c, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_9497719c79c89d2d) +} + +var fileDescriptor_9497719c79c89d2d = []byte{ + // 1457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xc5, + 0x1f, 0xcf, 0x3a, 0x71, 0xe3, 0x8c, 0xd3, 0x26, 0x9d, 0x5f, 0x0f, 0xfe, 0x05, 0xd5, 0x8e, 0x16, + 0x09, 0x85, 0x3e, 0x76, 0xdb, 0xb4, 0xa0, 0x72, 0x41, 0xd4, 0x01, 0x51, 0xab, 0x69, 0xb2, 0x8c, + 0x0d, 0x54, 0xc0, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0xc7, 0x81, 0xde, 0x40, + 0x9c, 0x38, 0xc1, 0x9d, 0x23, 0x12, 0x7f, 0x02, 0x70, 0xa0, 0x52, 0x05, 0x97, 0x1e, 0x7b, 0xec, + 0x85, 0x88, 0x9a, 0xff, 0xa2, 0x27, 0xf4, 0x9d, 0x9d, 0x7d, 0xf9, 0xd1, 0x6c, 0x38, 0xe4, 0x54, + 0xef, 0xf7, 0x3d, 0xdf, 0xe7, 0xa7, 0x41, 0x57, 0x07, 0xb7, 0x42, 0xc3, 0xf1, 0x4d, 0x1a, 0x38, + 0xa6, 0xc7, 0xc4, 0x97, 0x3e, 0x1f, 0x38, 0x5e, 0xcf, 0x3c, 0xbc, 0x7e, 0xc0, 0x04, 0xbd, 0x6e, + 0xf6, 0x98, 0xc7, 0x38, 0x15, 0xac, 0x6b, 0x04, 0xdc, 0x17, 0x3e, 0xbe, 0x18, 0x89, 0x1b, 0x34, + 0x70, 0x8c, 0x54, 0xdc, 0x50, 0xe2, 0x1b, 0x57, 0x7b, 0x8e, 0xe8, 0x8f, 0x0e, 0x0c, 0xdb, 0x1f, + 0x9a, 0x3d, 0xbf, 0xe7, 0x9b, 0x52, 0xeb, 0x60, 0xf4, 0x40, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0xb2, + 0xb6, 0xa1, 0x67, 0x9c, 0xdb, 0x3e, 0x67, 0xe6, 0xe1, 0x94, 0xc7, 0x8d, 0x9b, 0xa9, 0xcc, 0x90, + 0xda, 0x7d, 0xc7, 0x63, 0xfc, 0xa1, 0x19, 0x0c, 0x7a, 0x40, 0x08, 0xcd, 0x21, 0x13, 0x74, 0x96, + 0x96, 0x39, 0x4f, 0x8b, 0x8f, 0x3c, 0xe1, 0x0c, 0xd9, 0x94, 0xc2, 0x9b, 0xc7, 0x29, 0x84, 0x76, + 0x9f, 0x0d, 0xe9, 0x94, 0xde, 0x8d, 0x79, 0x7a, 0x23, 0xe1, 0xb8, 0xa6, 0xe3, 0x89, 0x50, 0xf0, + 0x49, 0x25, 0xfd, 0x4f, 0x0d, 0xad, 0xdd, 0xe9, 0x74, 0xac, 0x96, 0xd7, 0xe3, 0x2c, 0x0c, 0x2d, + 0x2a, 0xfa, 0x78, 0x13, 0x2d, 0x05, 0x54, 0xf4, 0x6b, 0xda, 0xa6, 0xb6, 0xb5, 0xd2, 0x5c, 0x7d, + 0x72, 0xd4, 0x58, 0x18, 0x1f, 0x35, 0x96, 0x80, 0x47, 0x24, 0x07, 0xdf, 0x44, 0x15, 0xf8, 0xb7, + 0xf3, 0x30, 0x60, 0xb5, 0x45, 0x29, 0x55, 0x1b, 0x1f, 0x35, 0x2a, 0x96, 0xa2, 0xbd, 0xc8, 0xfc, + 0x26, 0x89, 0x24, 0xbe, 0x8f, 0x96, 0x0f, 0xa8, 0x3d, 0x60, 0x5e, 0xb7, 0x56, 0xda, 0xd4, 0xb6, + 0xaa, 0xdb, 0x57, 0x8d, 0x97, 0xd6, 0xd0, 0x50, 0x41, 0x35, 0x23, 0xa5, 0xe6, 0x9a, 0x8a, 0x64, + 0x59, 0x11, 0x48, 0x6c, 0x4e, 0x1f, 0xa0, 0x0b, 0x99, 0x47, 0x90, 0x91, 0xcb, 0x3e, 0xa2, 0xee, + 0x88, 0xe1, 0x36, 0x2a, 0x83, 0xf7, 0xb0, 0xa6, 0x6d, 0x2e, 0x6e, 0x55, 0xb7, 0x8d, 0x63, 0xfc, + 0x4d, 0x24, 0xa2, 0x79, 0x56, 0x39, 0x2c, 0xc3, 0x57, 0x48, 0x22, 0x5b, 0xfa, 0x23, 0x0d, 0xad, + 0xb4, 0xac, 0xdb, 0xdd, 0x2e, 0xc8, 0xe1, 0xcf, 0x51, 0x05, 0x2a, 0xdf, 0xa5, 0x82, 0xca, 0x84, + 0x55, 0xb7, 0xaf, 0x65, 0xbc, 0x24, 0x85, 0x30, 0x82, 0x41, 0x0f, 0x08, 0xa1, 0x01, 0xd2, 0xc6, + 0xe1, 0x75, 0x63, 0xff, 0xe0, 0x0b, 0x66, 0x8b, 0x7b, 0x4c, 0xd0, 0x26, 0x56, 0x7e, 0x50, 0x4a, + 0x23, 0x89, 0x55, 0xbc, 0x87, 0x96, 0xc2, 0x80, 0xd9, 0x2a, 0x67, 0x57, 0x8e, 0xcb, 0x59, 0x1c, + 0x59, 0x3b, 0x60, 0x76, 0x5a, 0x3c, 0xf8, 0x22, 0xd2, 0x8e, 0xfe, 0xbb, 0x86, 0xce, 0x26, 0x52, + 0xbb, 0x4e, 0x28, 0xf0, 0x67, 0x53, 0x6f, 0x30, 0x8a, 0xbd, 0x01, 0xb4, 0xe5, 0x0b, 0xd6, 0x95, + 0x9f, 0x4a, 0x4c, 0xc9, 0xc4, 0x7f, 0x0f, 0x95, 0x1d, 0xc1, 0x86, 0x61, 0xad, 0x24, 0x8b, 0xb0, + 0x55, 0xf4, 0x01, 0x69, 0xfa, 0x5b, 0xa0, 0x4e, 0x22, 0x2b, 0xba, 0x9b, 0x89, 0x1e, 0x5e, 0x85, + 0x3f, 0x45, 0x2b, 0x01, 0xe5, 0xcc, 0x13, 0x84, 0x3d, 0x98, 0x11, 0xfe, 0x2c, 0x1f, 0x56, 0x2c, + 0xcf, 0x38, 0xf3, 0x6c, 0xd6, 0x3c, 0x3b, 0x3e, 0x6a, 0xac, 0x24, 0x44, 0x92, 0xda, 0xd3, 0xbf, + 0x2f, 0xa1, 0x65, 0xd5, 0x12, 0xa7, 0x50, 0xea, 0xdd, 0x5c, 0xa9, 0x2f, 0x15, 0x1b, 0x8f, 0x79, + 0x85, 0xc6, 0x1d, 0x74, 0x26, 0x14, 0x54, 0x8c, 0x42, 0x39, 0xa3, 0x05, 0x5a, 0x47, 0xd9, 0x93, + 0x3a, 0xcd, 0x73, 0xca, 0xe2, 0x99, 0xe8, 0x9b, 0x28, 0x5b, 0xfa, 0x77, 0x25, 0x74, 0x2e, 0x3f, + 0x98, 0xf8, 0x0d, 0x54, 0x0d, 0x19, 0x3f, 0x74, 0x6c, 0xb6, 0x47, 0x87, 0x4c, 0xed, 0x8d, 0xff, + 0x29, 0xfd, 0x6a, 0x3b, 0x65, 0x91, 0xac, 0x1c, 0xee, 0x25, 0x6a, 0x96, 0xcf, 0x85, 0x7a, 0xf4, + 0xfc, 0x94, 0xc2, 0x1a, 0x33, 0xa2, 0x35, 0x66, 0xb4, 0x3c, 0xb1, 0xcf, 0xdb, 0x82, 0x3b, 0x5e, + 0x6f, 0xca, 0x11, 0x18, 0x23, 0x59, 0xcb, 0xf8, 0x63, 0x54, 0xe1, 0x2c, 0xf4, 0x47, 0xdc, 0x66, + 0x2a, 0x15, 0xb9, 0xcd, 0x03, 0xfb, 0x1e, 0xca, 0x04, 0x4b, 0xaa, 0xbb, 0xeb, 0xdb, 0xd4, 0x8d, + 0x8a, 0x93, 0xf6, 0xc7, 0x2a, 0xb4, 0x36, 0x51, 0x26, 0x48, 0x62, 0x0c, 0xb6, 0xe7, 0xaa, 0xca, + 0xc5, 0x8e, 0x4b, 0x4f, 0xa5, 0x45, 0x3e, 0xc8, 0xb5, 0x88, 0x59, 0xac, 0xa4, 0x32, 0xb8, 0xb9, + 0x0b, 0xe1, 0x0f, 0x0d, 0xad, 0x67, 0x05, 0x4f, 0x61, 0x27, 0x58, 0xf9, 0x9d, 0x70, 0xf9, 0x04, + 0xcf, 0x98, 0xb3, 0x16, 0xfe, 0xd2, 0x50, 0x23, 0x2b, 0x66, 0x51, 0x4e, 0x87, 0x4c, 0x30, 0x1e, + 0x26, 0x65, 0xc4, 0x5b, 0xa8, 0x42, 0xad, 0xd6, 0xfb, 0xdc, 0x1f, 0x05, 0xf1, 0x71, 0x83, 0xf8, + 0x6e, 0x2b, 0x1a, 0x49, 0xb8, 0x70, 0x02, 0x07, 0x8e, 0xba, 0x53, 0x99, 0x13, 0x78, 0xd7, 0xf1, + 0xba, 0x44, 0x72, 0x40, 0xc2, 0x83, 0x66, 0x5f, 0xcc, 0x4b, 0xc8, 0x2e, 0x97, 0x1c, 0xdc, 0x40, + 0xe5, 0xd0, 0xf6, 0x03, 0x56, 0x5b, 0x92, 0x22, 0x2b, 0x10, 0x72, 0x1b, 0x08, 0x24, 0xa2, 0xe3, + 0xcb, 0x68, 0x05, 0x04, 0xc3, 0x80, 0xda, 0xac, 0x56, 0x96, 0x42, 0x72, 0x11, 0xed, 0xc5, 0x44, + 0x92, 0xf2, 0xf5, 0x5f, 0x26, 0x8a, 0x24, 0x57, 0xdf, 0x36, 0x42, 0xb6, 0xef, 0x09, 0xee, 0xbb, + 0x2e, 0xe3, 0xea, 0x49, 0x49, 0xfb, 0xec, 0x24, 0x1c, 0x92, 0x91, 0xc2, 0x1e, 0x42, 0x41, 0x92, + 0x1b, 0xd5, 0x46, 0x6f, 0x9f, 0x20, 0xff, 0x33, 0x12, 0xdb, 0x3c, 0x07, 0xfe, 0x32, 0x8c, 0x8c, + 0x07, 0xfd, 0x37, 0x0d, 0x55, 0x95, 0xfe, 0x29, 0x34, 0xd6, 0xdd, 0x7c, 0x63, 0xbd, 0x56, 0x10, + 0x61, 0xcc, 0xee, 0xa9, 0x47, 0x1a, 0xda, 0x88, 0x43, 0xf7, 0x69, 0xb7, 0x49, 0x5d, 0xea, 0xd9, + 0x8c, 0xc7, 0xf7, 0x60, 0x03, 0x95, 0x9c, 0xb8, 0x91, 0x90, 0x32, 0x50, 0x6a, 0x59, 0xa4, 0xe4, + 0x04, 0xf8, 0x0a, 0xaa, 0xf4, 0xfd, 0x50, 0xc8, 0x16, 0x89, 0x9a, 0x28, 0x89, 0xfa, 0x8e, 0xa2, + 0x93, 0x44, 0x02, 0x7f, 0x88, 0xca, 0x81, 0xcf, 0x45, 0x58, 0x5b, 0x92, 0x51, 0x5f, 0x2b, 0x16, + 0x35, 0xec, 0x36, 0xb5, 0xac, 0x53, 0xa4, 0x02, 0x66, 0x48, 0x64, 0x4d, 0xff, 0x46, 0x43, 0xff, + 0x9f, 0x11, 0x7f, 0xa4, 0x83, 0xbb, 0x68, 0xd9, 0x89, 0x98, 0x0a, 0x1e, 0xbd, 0x55, 0xcc, 0xed, + 0x8c, 0x54, 0xa4, 0xd0, 0x2c, 0x86, 0x60, 0xb1, 0x69, 0xfd, 0x27, 0x0d, 0x9d, 0x9f, 0x8a, 0x57, + 0x42, 0x4c, 0xd8, 0xf9, 0x90, 0xbc, 0x72, 0x06, 0x62, 0xc2, 0xea, 0x96, 0x1c, 0x7c, 0x17, 0x55, + 0x24, 0x42, 0xb5, 0x7d, 0x57, 0x25, 0xd0, 0x8c, 0x13, 0x68, 0x29, 0xfa, 0x8b, 0xa3, 0xc6, 0x2b, + 0xd3, 0xb0, 0xdd, 0x88, 0xd9, 0x24, 0x31, 0x00, 0xa3, 0xc8, 0x38, 0xf7, 0xb9, 0x9a, 0x56, 0x39, + 0x8a, 0xef, 0x01, 0x81, 0x44, 0x74, 0xfd, 0xe7, 0xb4, 0x49, 0x01, 0x3d, 0x42, 0x7c, 0x50, 0x9c, + 0x49, 0x08, 0x0c, 0xa5, 0x23, 0x92, 0x83, 0x47, 0x68, 0xdd, 0x99, 0x80, 0x9b, 0x27, 0xdb, 0xc9, + 0x89, 0x5a, 0xb3, 0xa6, 0xcc, 0xaf, 0x4f, 0x72, 0xc8, 0x94, 0x0b, 0x9d, 0xa1, 0x29, 0x29, 0x38, + 0x09, 0x7d, 0x21, 0x02, 0x35, 0x4d, 0x37, 0x8a, 0x83, 0xdc, 0x34, 0x84, 0x8a, 0x7c, 0x5d, 0xa7, + 0x63, 0x11, 0x69, 0x4a, 0x7f, 0x5c, 0x4a, 0xf2, 0x21, 0x17, 0xcd, 0x3b, 0xc9, 0x6b, 0xe5, 0x0e, + 0x90, 0x67, 0x3e, 0x5a, 0x6b, 0x17, 0x32, 0x81, 0x27, 0x3c, 0x32, 0x25, 0x8d, 0x3b, 0x29, 0xf8, + 0xd7, 0xfe, 0x0b, 0xf8, 0xaf, 0xce, 0x02, 0xfe, 0xf8, 0x0e, 0x5a, 0x14, 0x6e, 0x3c, 0xec, 0xaf, + 0x17, 0xb3, 0xd8, 0xd9, 0x6d, 0x37, 0xab, 0x2a, 0xe5, 0x8b, 0x9d, 0xdd, 0x36, 0x01, 0x13, 0x78, + 0x1f, 0x95, 0xf9, 0xc8, 0x65, 0x80, 0x95, 0x16, 0x8b, 0x63, 0x2f, 0xc8, 0x60, 0x3a, 0x7c, 0xf0, + 0x15, 0x92, 0xc8, 0x8e, 0xfe, 0x2d, 0xc0, 0xec, 0x2c, 0xa2, 0xc2, 0x1c, 0xad, 0xba, 0x99, 0xd9, + 0x51, 0x79, 0xb8, 0x75, 0xf2, 0xa9, 0x53, 0x43, 0x7f, 0x41, 0xf9, 0x5d, 0xcd, 0xf2, 0x48, 0xce, + 0x87, 0x4e, 0x11, 0x4a, 0x9f, 0x0d, 0x73, 0x00, 0xcd, 0x1b, 0x0d, 0xbc, 0x9a, 0x03, 0xe8, 0xe9, + 0x90, 0x44, 0x74, 0x38, 0x28, 0x21, 0xb3, 0x39, 0x13, 0x7b, 0xe9, 0xe2, 0x4a, 0x0e, 0x4a, 0x3b, + 0xe1, 0x90, 0x8c, 0x94, 0xfe, 0xab, 0x86, 0xd6, 0x26, 0x00, 0x35, 0x7e, 0x15, 0x95, 0x7b, 0x99, + 0x33, 0x9b, 0x64, 0x28, 0xba, 0xb3, 0x11, 0x0f, 0x76, 0x64, 0x02, 0xcb, 0x26, 0x76, 0xe4, 0x34, + 0xd6, 0xc2, 0x66, 0xf6, 0x5a, 0x46, 0x73, 0x7c, 0x5e, 0x89, 0xcf, 0xbc, 0x98, 0xc9, 0x85, 0x5e, + 0x9a, 0x77, 0xa1, 0xf5, 0x1f, 0x4b, 0x28, 0x06, 0x8d, 0x3b, 0xad, 0x77, 0xc9, 0x29, 0xa0, 0x37, + 0x2b, 0x87, 0xde, 0x8e, 0xfb, 0x6f, 0x4a, 0x26, 0xb6, 0xb9, 0x20, 0xff, 0xfe, 0x04, 0xc8, 0xbf, + 0x76, 0x02, 0x9b, 0x2f, 0x07, 0xfa, 0x8f, 0x35, 0xb4, 0x96, 0x91, 0x3e, 0x85, 0xe3, 0xbd, 0x9f, + 0x3f, 0xde, 0x97, 0x8a, 0x3f, 0x65, 0xce, 0x01, 0xdf, 0xce, 0xbd, 0x40, 0x6e, 0xb2, 0x06, 0x2a, + 0xdb, 0x4e, 0x97, 0xe7, 0x46, 0x00, 0x98, 0x21, 0x89, 0xe8, 0xfa, 0x57, 0xe8, 0xfc, 0x54, 0x8e, + 0xb0, 0x2d, 0x81, 0x56, 0xd7, 0x11, 0x8e, 0xef, 0xc5, 0xe7, 0xd2, 0x2c, 0xf6, 0xf2, 0x9d, 0x58, + 0x2f, 0x87, 0xcc, 0x94, 0x29, 0x92, 0x31, 0xdb, 0xdc, 0x79, 0xf2, 0xbc, 0xbe, 0xf0, 0xf4, 0x79, + 0x7d, 0xe1, 0xd9, 0xf3, 0xfa, 0xc2, 0xd7, 0xe3, 0xba, 0xf6, 0x64, 0x5c, 0xd7, 0x9e, 0x8e, 0xeb, + 0xda, 0xb3, 0x71, 0x5d, 0xfb, 0x7b, 0x5c, 0xd7, 0x7e, 0xf8, 0xa7, 0xbe, 0xf0, 0xc9, 0xc5, 0x97, + 0xfe, 0x99, 0xec, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xc2, 0xa4, 0xff, 0x46, 0x13, 0x00, + 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -611,7 +956,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Ingress) Marshal() (dAtA []byte, err error) { +func (m *IPAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -621,26 +966,16 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -664,7 +999,7 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -674,30 +1009,32 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x1a } { - size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -705,16 +1042,11 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IngressClass) Marshal() (dAtA []byte, err error) { +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -724,40 +1056,32 @@ func (m *IngressClass) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IngressClassList) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -767,12 +1091,158 @@ func (m *IngressClassList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -945,6 +1415,128 @@ func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *IngressLoadBalancerIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressLoadBalancerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressLoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressLoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IngressPortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressPortStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressPortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + i -= len(*m.Error) + copy(dAtA[i:], *m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1158,21 +1750,233 @@ func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Path) @@ -1201,6 +2005,49 @@ func (m *HTTPIngressRuleValue) Size() (n int) { return n } +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Ingress) Size() (n int) { if m == nil { return 0 @@ -1320,51 +2167,101 @@ func (m *IngressList) Size() (n int) { return n } -func (m *IngressRule) Size() (n int) { +func (m *IngressLoadBalancerIngress) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Host) + l = len(m.IP) n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() + l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } -func (m *IngressRuleValue) Size() (n int) { +func (m *IngressLoadBalancerStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *IngressSpec) Size() (n int) { +func (m *IngressPortStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Backend != nil { - l = m.Backend.Size() + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.Error != nil { + l = len(*m.Error) n += 1 + l + sovGenerated(uint64(l)) } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + return n +} + +func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } @@ -1403,6 +2300,85 @@ func (m *IngressTLS) Size() (n int) { return n } +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceCIDR) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceCIDRList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func sovGenerated(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1436,6 +2412,43 @@ func (this *HTTPIngressRuleValue) String() string { }, "") return s } +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *Ingress) String() string { if this == nil { return "nil" @@ -1528,6 +2541,50 @@ func (this *IngressList) String() string { }, "") return s } +func (this *IngressLoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]IngressPortStatus{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&IngressLoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *IngressLoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]IngressLoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&IngressLoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *IngressPortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressPortStatus{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Error:` + valueToStringGenerated(this.Error) + `,`, + `}`, + }, "") + return s +} func (this *IngressRule) String() string { if this == nil { return "nil" @@ -1577,7 +2634,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1586,22 +2643,1172 @@ func (this *IngressTLS) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IngressClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1624,15 +3831,15 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1660,13 +3867,14 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1676,28 +3884,27 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1725,8 +3932,73 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -1749,7 +4021,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1772,15 +4044,47 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1807,8 +4111,10 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Parameters == nil { + m.Parameters = &IngressClassParametersReference{} + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1833,7 +4139,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1856,15 +4162,15 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1891,46 +4197,13 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1957,7 +4230,8 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1982,7 +4256,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2005,15 +4279,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2041,13 +4315,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServiceName = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2057,28 +4331,27 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2105,10 +4378,8 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2133,7 +4404,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClass) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2156,15 +4427,15 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2191,15 +4462,85 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2209,24 +4550,56 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2249,7 +4622,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassList) Unmarshal(dAtA []byte) error { +func (m *IngressRule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2272,15 +4645,47 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2304,16 +4709,66 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2340,8 +4795,10 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IngressClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2366,7 +4823,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2389,17 +4846,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2409,30 +4866,33 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.APIGroup = &s + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2442,29 +4902,31 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2474,27 +4936,29 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2523,13 +4987,63 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) - m.Scope = &s + m.IngressClassName = &s iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2539,24 +5053,24 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2579,7 +5093,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { +func (m *IngressTLS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2602,15 +5116,15 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2638,13 +5152,13 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Controller = string(dAtA[iNdEx:postIndex]) + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2654,27 +5168,23 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parameters == nil { - m.Parameters = &IngressClassParametersReference{} - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2697,7 +5207,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *ParentReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2720,17 +5230,81 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2740,30 +5314,29 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2773,25 +5346,23 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2814,7 +5385,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2837,17 +5408,17 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2857,27 +5428,28 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2904,63 +5476,13 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2987,10 +5509,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3015,7 +5534,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3038,15 +5557,15 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3073,50 +5592,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3143,44 +5625,11 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ServiceCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3202,7 +5651,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3225,17 +5674,17 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3245,24 +5694,23 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -3285,7 +5733,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3308,17 +5756,17 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3328,55 +5776,25 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto index ef8cceff4..3368dcaec 100644 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -28,19 +28,19 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/networking/v1beta1"; // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -57,7 +57,7 @@ message HTTPIngressPath { // Defaults to ImplementationSpecific. optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; } @@ -68,10 +68,49 @@ message HTTPIngressPath { // to match against everything after the last '/' and before the first '?' // or '#'. message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. + // +listType=atomic repeated HTTPIngressPath paths = 1; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name @@ -80,14 +119,14 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; @@ -95,19 +134,19 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional optional string serviceName = 1; - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -119,9 +158,9 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressClassSpec spec = 2; @@ -131,32 +170,32 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of IngressClasses. + // items is the list of IngressClasses. repeated IngressClass items = 2; } // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. message IngressClassParametersReference { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional optional string aPIGroup = 1; - // Kind is the type of resource being referenced. + // kind is the type of resource being referenced. optional string kind = 2; - // Name is the name of resource being referenced. + // name is the name of resource being referenced. optional string name = 3; - // Scope represents if this refers to a cluster or namespace scoped resource. + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". optional string scope = 4; - // Namespace is the namespace of the resource being referenced. This field is + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -165,15 +204,15 @@ message IngressClassParametersReference { // IngressClassSpec provides information about the class of an Ingress. message IngressClassSpec { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. optional string controller = 1; - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -185,17 +224,65 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of Ingress. + // items is the list of Ingress. repeated Ingress items = 2; } +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +message IngressLoadBalancerIngress { + // ip is set for load-balancer ingress points that are IP based. + // +optional + optional string ip = 1; + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + optional string hostname = 2; + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + repeated IngressPortStatus ports = 4; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message IngressLoadBalancerStatus { + // ingress is a list containing ingress points for the load-balancer. + // +optional + // +listType=atomic + repeated IngressLoadBalancerIngress ingress = 1; +} + +// IngressPortStatus represents the error condition of a service port +message IngressPortStatus { + // port is the port number of the ingress port. + optional int32 port = 1; + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + optional string protocol = 2; + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + optional string error = 3; +} + // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -208,7 +295,7 @@ message IngressRule { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -240,7 +327,7 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -253,44 +340,47 @@ message IngressSpec { // +optional optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional optional IngressBackend backend = 1; - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic repeated IngressTLS tls = 2; - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic repeated IngressRule rules = 3; } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; + optional IngressLoadBalancerStatus loadBalancer = 1; } // IngressTLS describes the transport layer security associated with an Ingress. message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic repeated string hosts = 1; - // SecretName is the name of the secret used to terminate TLS traffic on + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination @@ -299,3 +389,74 @@ message IngressTLS { optional string secretName = 2; } +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; +} + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + repeated string cidrs = 1; +} + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; +} + diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go index 04234953e..9d2a13cc6 100644 --- a/vendor/k8s.io/api/networking/v1beta1/register.go +++ b/vendor/k8s.io/api/networking/v1beta1/register.go @@ -51,6 +51,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, + &IPAddress{}, + &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go index 1bfdcd091..cd7126a5a 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -34,17 +34,18 @@ import ( // based virtual hosting etc. type Ingress struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the Ingress. + // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status is the current state of the Ingress. + // status is the current state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` @@ -58,18 +59,19 @@ type Ingress struct { // IngressList is a collection of Ingress. type IngressList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of Ingress. + // items is the list of Ingress. Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` } // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The + // ingressClassName is the name of the IngressClass cluster resource. The // associated IngressClass defines which controller will implement the // resource. This replaces the deprecated `kubernetes.io/ingress.class` // annotation. For backwards compatibility, when that annotation is set, it @@ -82,37 +84,41 @@ type IngressSpec struct { // +optional IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any + // backend is the default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to // specify a global default. // +optional Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified + // tls represents the TLS configuration. Currently the Ingress only supports a + // single TLS port, 443. If multiple members of this list specify different hosts, + // they will be multiplexed on the same port according to the hostname specified // through the SNI TLS extension, if the ingress controller fulfilling the // ingress supports SNI. // +optional + // +listType=atomic TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - // A list of host rules used to configure the Ingress. If unspecified, or + // rules is a list of host rules used to configure the Ingress. If unspecified, or // no rule matches, all traffic is sent to the default backend. // +optional + // +listType=atomic Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` // TODO: Add the ability to specify load-balancer IP through claims } // IngressTLS describes the transport layer security associated with an Ingress. type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in + // hosts is a list of hosts included in the TLS certificate. The values in // this list must match the name/s used in the tlsSecret. Defaults to the // wildcard host setting for the loadbalancer controller fulfilling this // Ingress, if left unspecified. // +optional + // +listType=atomic Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on + + // secretName is the name of the secret used to terminate TLS traffic on // port 443. Field is left optional to allow TLS routing based on SNI // hostname alone. If the SNI host in a listener conflicts with the "Host" // header field used by an IngressRule, the SNI host is used for termination @@ -122,18 +128,66 @@ type IngressTLS struct { // TODO: Consider specifying different modes of termination, protocols etc. } -// IngressStatus describe the current state of the Ingress. +// IngressStatus describes the current state of the Ingress. type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. + // loadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type IngressLoadBalancerStatus struct { + // ingress is a list containing ingress points for the load-balancer. // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` + // +listType=atomic + Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// IngressLoadBalancerIngress represents the status of a load-balancer ingress point. +type IngressLoadBalancerIngress struct { + // ip is set for load-balancer ingress points that are IP based. + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // hostname is set for load-balancer ingress points that are DNS based. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + + // ports provides information about the ports exposed by this LoadBalancer. + // +listType=atomic + // +optional + Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"` +} + +// IngressPortStatus represents the error condition of a service port +type IngressPortStatus struct { + // port is the port number of the ingress port. + Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` + + // protocol is the protocol of the ingress port. + // The supported values are: "TCP", "UDP", "SCTP" + Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // error is to record the problem with the service port + // The format of the error shall comply with the following rules: + // - built-in error values shall be specified in this file and those shall use + // CamelCase names + // - cloud provider specific error values must have names that comply with the + // format foo.example.com/CamelCase. + // --- + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } // IngressRule represents the rules mapping the paths under a specified host to // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // host is the fully qualified domain name of a network host, as defined by RFC 3986. // Note the following deviations from the "host" part of the // URI as defined in RFC 3986: // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to @@ -146,7 +200,7 @@ type IngressRule struct { // IngressRuleValue. If the host is unspecified, the Ingress routes all // traffic based on the specified IngressRuleValue. // - // Host can be "precise" which is a domain name without the terminating dot of + // host can be "precise" which is a domain name without the terminating dot of // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name // prefixed with a single wildcard label (e.g. "*.foo.com"). // The wildcard character '*' must appear by itself as the first DNS label and @@ -157,13 +211,14 @@ type IngressRule struct { // is to equal to the suffix (removing the first label) of the wildcard rule. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. // If unspecified, the rule defaults to a http catch-all. Whether that sends // just traffic matching the host to the default backend or all traffic to the // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -187,7 +242,8 @@ type IngressRuleValue struct { // to match against everything after the last '/' and before the first '?' // or '#'. type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. + // paths is a collection of paths that map requests to backends. + // +listType=atomic Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` // TODO: Consider adding fields for ingress-type specific global // options usable by a loadbalancer, like http keep-alive. @@ -226,14 +282,14 @@ const ( // HTTPIngressPath associates a path with a backend. Incoming urls matching the // path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can + // path is matched against the path of an incoming request. Currently it can // contain characters disallowed from the conventional "path" part of a URL // as defined by RFC 3986. Paths must begin with a '/' and must be present // when using PathType with value "Exact" or "Prefix". // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can + // pathType determines the interpretation of the path matching. PathType can // be one of the following values: // * Exact: Matches the URL path exactly. // * Prefix: Matches based on a URL path prefix split by '/'. Matching is @@ -250,22 +306,22 @@ type HTTPIngressPath struct { // Defaults to ImplementationSpecific. PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic + // backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` } // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { - // Specifies the name of the referenced service. + // serviceName specifies the name of the referenced service. // +optional ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` - // Specifies the port of the referenced service. + // servicePort Specifies the port of the referenced service. // +optional ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - // Resource is an ObjectRef to another Kubernetes resource in the namespace + // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional @@ -286,12 +342,13 @@ type IngressBackend struct { // resources without a class specified will be assigned this default class. type IngressClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is the desired state of the IngressClass. + // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -299,15 +356,15 @@ type IngressClass struct { // IngressClassSpec provides information about the class of an Ingress. type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this + // controller refers to the name of the controller that should handle this // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the + // same controller. For example, you may have different parameters for the // same implementing controller. This should be specified as a // domain-prefixed path no more than 250 characters in length, e.g. // "acme.io/ingress-controller". This field is immutable. Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - // Parameters is a link to a custom resource containing additional + // parameters is a link to a custom resource containing additional // configuration for the controller. This is optional if the controller does // not require extra parameters. // +optional @@ -318,7 +375,7 @@ const ( // IngressClassParametersReferenceScopeNamespace indicates that the // referenced Parameters resource is namespace-scoped. IngressClassParametersReferenceScopeNamespace = "Namespace" - // IngressClassParametersReferenceScopeNamespace indicates that the + // IngressClassParametersReferenceScopeCluster indicates that the // referenced Parameters resource is cluster-scoped. IngressClassParametersReferenceScopeCluster = "Cluster" ) @@ -326,19 +383,23 @@ const ( // IngressClassParametersReference identifies an API object. This can be used // to specify a cluster or namespace-scoped resource. type IngressClassParametersReference struct { - // APIGroup is the group for the resource being referenced. If APIGroup is + // apiGroup is the group for the resource being referenced. If APIGroup is // not specified, the specified Kind must be in the core API group. For any // other third-party types, APIGroup is required. // +optional APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"` - // Kind is the type of resource being referenced. + + // kind is the type of resource being referenced. Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced. + + // name is the name of resource being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Scope represents if this refers to a cluster or namespace scoped resource. + + // scope represents if this refers to a cluster or namespace scoped resource. // This may be set to "Cluster" (default) or "Namespace". Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"` - // Namespace is the namespace of the resource being referenced. This field is + + // namespace is the namespace of the resource being referenced. This field is // required when scope is set to "Namespace" and must be unset when scope is set to // "Cluster". // +optional @@ -357,6 +418,136 @@ type IngressClassList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of IngressClasses. + // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index 79c42a6ba..9d27517f3 100644 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -24,14 +24,14 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_HTTPIngressPath = map[string]string{ "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "path": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "pathType": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -40,18 +40,47 @@ func (HTTPIngressPath) SwaggerDoc() map[string]string { var map_HTTPIngressRuleValue = map[string]string{ "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", + "paths": "paths is a collection of paths that map requests to backends.", } func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -60,9 +89,9 @@ func (Ingress) SwaggerDoc() map[string]string { var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", + "serviceName": "serviceName specifies the name of the referenced service.", + "servicePort": "servicePort Specifies the port of the referenced service.", + "resource": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -72,7 +101,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressClass = map[string]string{ "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (IngressClass) SwaggerDoc() map[string]string { @@ -82,7 +111,7 @@ func (IngressClass) SwaggerDoc() map[string]string { var map_IngressClassList = map[string]string{ "": "IngressClassList is a collection of IngressClasses.", "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", + "items": "items is the list of IngressClasses.", } func (IngressClassList) SwaggerDoc() map[string]string { @@ -91,11 +120,11 @@ func (IngressClassList) SwaggerDoc() map[string]string { var map_IngressClassParametersReference = map[string]string{ "": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced.", - "name": "Name is the name of resource being referenced.", - "scope": "Scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", - "namespace": "Namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "apiGroup": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "kind is the type of resource being referenced.", + "name": "name is the name of resource being referenced.", + "scope": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "namespace": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", } func (IngressClassParametersReference) SwaggerDoc() map[string]string { @@ -104,8 +133,8 @@ func (IngressClassParametersReference) SwaggerDoc() map[string]string { var map_IngressClassSpec = map[string]string{ "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "controller": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", } func (IngressClassSpec) SwaggerDoc() map[string]string { @@ -115,16 +144,47 @@ func (IngressClassSpec) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", + "items": "items is the list of Ingress.", } func (IngressList) SwaggerDoc() map[string]string { return map_IngressList } +var map_IngressLoadBalancerIngress = map[string]string{ + "": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "ip": "ip is set for load-balancer ingress points that are IP based.", + "hostname": "hostname is set for load-balancer ingress points that are DNS based.", + "ports": "ports provides information about the ports exposed by this LoadBalancer.", +} + +func (IngressLoadBalancerIngress) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerIngress +} + +var map_IngressLoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "ingress is a list containing ingress points for the load-balancer.", +} + +func (IngressLoadBalancerStatus) SwaggerDoc() map[string]string { + return map_IngressLoadBalancerStatus +} + +var map_IngressPortStatus = map[string]string{ + "": "IngressPortStatus represents the error condition of a service port", + "port": "port is the port number of the ingress port.", + "protocol": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "error": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", +} + +func (IngressPortStatus) SwaggerDoc() map[string]string { + return map_IngressPortStatus +} + var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -141,10 +201,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { var map_IngressSpec = map[string]string{ "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "ingressClassName": "ingressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "backend is the default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -152,8 +212,8 @@ func (IngressSpec) SwaggerDoc() map[string]string { } var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", + "": "IngressStatus describes the current state of the Ingress.", + "loadBalancer": "loadBalancer contains the current status of the load-balancer.", } func (IngressStatus) SwaggerDoc() map[string]string { @@ -162,12 +222,63 @@ func (IngressStatus) SwaggerDoc() map[string]string { var map_IngressTLS = map[string]string{ "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", + "hosts": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", } func (IngressTLS) SwaggerDoc() map[string]string { return map_IngressTLS } +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go new file mode 100644 index 000000000..bc2207766 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index 77259e368..1a6869cd6 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -71,6 +72,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Ingress) DeepCopyInto(out *Ingress) { *out = *in @@ -266,6 +348,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]IngressPortStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress. +func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress { + if in == nil { + return nil + } + out := new(IngressLoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]IngressLoadBalancerIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus. +func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus { + if in == nil { + return nil + } + out := new(IngressLoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) { + *out = *in + if in.Error != nil { + in, out := &in.Error, &out.Error + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus. +func (in *IngressPortStatus) DeepCopy() *IngressPortStatus { + if in == nil { + return nil + } + out := new(IngressPortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressRule) DeepCopyInto(out *IngressRule) { *out = *in @@ -381,3 +530,124 @@ func (in *IngressTLS) DeepCopy() *IngressTLS { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { + if in == nil { + return nil + } + out := new(ServiceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { + if in == nil { + return nil + } + out := new(ServiceCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { + if in == nil { + return nil + } + out := new(ServiceCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { + if in == nil { + return nil + } + out := new(ServiceCIDRStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go index e8b4c7ec7..a876fd5fe 100644 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,42 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *Ingress) APILifecycleIntroduced() (major, minor int) { @@ -120,3 +156,39 @@ func (in *IngressList) APILifecycleReplacement() schema.GroupVersionKind { func (in *IngressList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/node/v1/doc.go b/vendor/k8s.io/api/node/v1/doc.go index 12cbcb8a0..57ca52445 100644 --- a/vendor/k8s.io/api/node/v1/doc.go +++ b/vendor/k8s.io/api/node/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=node.k8s.io package v1 // import "k8s.io/api/node/v1" diff --git a/vendor/k8s.io/api/node/v1/generated.pb.go b/vendor/k8s.io/api/node/v1/generated.pb.go index d930f63b2..4c304f55f 100644 --- a/vendor/k8s.io/api/node/v1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto +// source: k8s.io/api/node/v1/generated.proto package v1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{0} + return fileDescriptor_9007436710e7565b, []int{0} } func (m *Overhead) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_Overhead proto.InternalMessageInfo func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } func (*RuntimeClass) ProtoMessage() {} func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{1} + return fileDescriptor_9007436710e7565b, []int{1} } func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } func (*RuntimeClassList) ProtoMessage() {} func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{2} + return fileDescriptor_9007436710e7565b, []int{2} } func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo func (m *Scheduling) Reset() { *m = Scheduling{} } func (*Scheduling) ProtoMessage() {} func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_6ac9be560e26ae98, []int{3} + return fileDescriptor_9007436710e7565b, []int{3} } func (m *Scheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -170,52 +170,52 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1/generated.proto", fileDescriptor_6ac9be560e26ae98) + proto.RegisterFile("k8s.io/api/node/v1/generated.proto", fileDescriptor_9007436710e7565b) } -var fileDescriptor_6ac9be560e26ae98 = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xce, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x94, 0xa3, 0x43, 0x14, 0x21, 0x27, 0xca, 0x14, 0x90, - 0x7a, 0x6e, 0x2b, 0x84, 0x2a, 0x18, 0x90, 0x0c, 0xad, 0x40, 0x82, 0x02, 0x2e, 0x2c, 0x88, 0x81, - 0x8b, 0xfd, 0x70, 0xdc, 0xc4, 0xbe, 0xe8, 0x7c, 0x8e, 0xc8, 0x86, 0x58, 0x90, 0x98, 0xfa, 0x5f, - 0x18, 0xf8, 0x0b, 0x15, 0x53, 0xc7, 0x4e, 0x2d, 0x0d, 0xff, 0x82, 0x09, 0xdd, 0xd9, 0x4e, 0x5c, - 0x12, 0x42, 0xd9, 0xee, 0xde, 0x7d, 0xdf, 0xf7, 0xde, 0xfb, 0xde, 0x3d, 0x7c, 0xbf, 0xbb, 0x1d, - 0x51, 0x9f, 0x9b, 0xdd, 0xb8, 0x0d, 0x22, 0x04, 0x09, 0x91, 0x39, 0x80, 0xd0, 0xe5, 0xc2, 0x4c, - 0x1f, 0x58, 0xdf, 0x37, 0x43, 0xee, 0x82, 0x39, 0xd8, 0x34, 0x3d, 0x08, 0x41, 0x30, 0x09, 0x2e, - 0xed, 0x0b, 0x2e, 0x39, 0x21, 0x09, 0x86, 0xb2, 0xbe, 0x4f, 0x15, 0x86, 0x0e, 0x36, 0x6b, 0xeb, - 0x9e, 0x2f, 0x3b, 0x71, 0x9b, 0x3a, 0x3c, 0x30, 0x3d, 0xee, 0x71, 0x53, 0x43, 0xdb, 0xf1, 0x7b, - 0x7d, 0xd3, 0x17, 0x7d, 0x4a, 0x24, 0x6a, 0xcd, 0x5c, 0x1a, 0x87, 0x8b, 0x59, 0x69, 0x6a, 0x77, - 0x26, 0x98, 0x80, 0x39, 0x1d, 0x3f, 0x04, 0x31, 0x34, 0xfb, 0x5d, 0x4f, 0x93, 0x04, 0x44, 0x3c, - 0x16, 0x0e, 0xfc, 0x17, 0x2b, 0x32, 0x03, 0x90, 0x6c, 0x56, 0x2e, 0xf3, 0x6f, 0x2c, 0x11, 0x87, - 0xd2, 0x0f, 0xa6, 0xd3, 0xdc, 0xfd, 0x17, 0x21, 0x72, 0x3a, 0x10, 0xb0, 0x3f, 0x79, 0xcd, 0xef, - 0x45, 0x5c, 0x7a, 0x3e, 0x00, 0xd1, 0x01, 0xe6, 0x92, 0x63, 0x84, 0x4b, 0x7d, 0xee, 0xee, 0xfa, - 0x1f, 0xc0, 0xad, 0xa2, 0xc6, 0x42, 0xab, 0xbc, 0x75, 0x9b, 0x4e, 0x9b, 0x4b, 0x33, 0x02, 0x7d, - 0x91, 0x82, 0x77, 0x42, 0x29, 0x86, 0xd6, 0x67, 0x74, 0x74, 0x5a, 0x2f, 0x8c, 0x4e, 0xeb, 0xa5, - 0x2c, 0xfe, 0xeb, 0xb4, 0x5e, 0x9f, 0x76, 0x96, 0xda, 0xa9, 0x59, 0x4f, 0xfd, 0x48, 0x7e, 0x3a, - 0x9b, 0x0b, 0xd9, 0x63, 0x01, 0x7c, 0x39, 0xab, 0xaf, 0x5f, 0xc6, 0x7b, 0xfa, 0x32, 0x66, 0xa1, - 0xf4, 0xe5, 0xd0, 0x1e, 0x77, 0x51, 0xeb, 0xe2, 0x95, 0x0b, 0x45, 0x92, 0x55, 0xbc, 0xd0, 0x85, - 0x61, 0x15, 0x35, 0x50, 0x6b, 0xd9, 0x56, 0x47, 0xf2, 0x08, 0x2f, 0x0e, 0x58, 0x2f, 0x86, 0x6a, - 0xb1, 0x81, 0x5a, 0xe5, 0x2d, 0x9a, 0xeb, 0x78, 0x9c, 0x8b, 0xf6, 0xbb, 0x9e, 0xb6, 0x60, 0x3a, - 0x57, 0x42, 0xbe, 0x57, 0xdc, 0x46, 0xcd, 0xaf, 0x45, 0x5c, 0xb1, 0x13, 0xbf, 0x1f, 0xf6, 0x58, - 0x14, 0x91, 0x77, 0xb8, 0xa4, 0x26, 0xec, 0x32, 0xc9, 0x74, 0xc6, 0xf2, 0xd6, 0xc6, 0x3c, 0xf5, - 0x88, 0x2a, 0xb4, 0x76, 0xb8, 0x7d, 0x00, 0x8e, 0x7c, 0x06, 0x92, 0x59, 0x24, 0x35, 0x15, 0x4f, - 0x62, 0xf6, 0x58, 0x95, 0xdc, 0xc2, 0x4b, 0x1d, 0x16, 0xba, 0x3d, 0x10, 0xba, 0xfc, 0x65, 0xeb, - 0x5a, 0x0a, 0x5f, 0x7a, 0x9c, 0x84, 0xed, 0xec, 0x9d, 0xec, 0xe2, 0x12, 0x4f, 0x07, 0x57, 0x5d, - 0xd0, 0xc5, 0xdc, 0x9c, 0x37, 0x5c, 0xab, 0xa2, 0x26, 0x99, 0xdd, 0xec, 0x31, 0x97, 0xec, 0x61, - 0xac, 0x3e, 0x93, 0x1b, 0xf7, 0xfc, 0xd0, 0xab, 0x5e, 0xd1, 0x4a, 0xc6, 0x2c, 0xa5, 0xfd, 0x31, - 0xca, 0xba, 0xaa, 0x1a, 0x98, 0xdc, 0xed, 0x9c, 0x42, 0xf3, 0x1b, 0xc2, 0xab, 0x79, 0xd7, 0xd4, - 0xaf, 0x20, 0x6f, 0xa7, 0x9c, 0xa3, 0x97, 0x73, 0x4e, 0xb1, 0xb5, 0x6f, 0xab, 0xd9, 0x67, 0xcc, - 0x22, 0x39, 0xd7, 0x76, 0xf0, 0xa2, 0x2f, 0x21, 0x88, 0xaa, 0x45, 0xfd, 0xc9, 0x1b, 0xb3, 0xaa, - 0xcf, 0x97, 0x64, 0xad, 0xa4, 0x62, 0x8b, 0x4f, 0x14, 0xcd, 0x4e, 0xd8, 0xcd, 0xc3, 0x22, 0xce, - 0x35, 0x45, 0x0e, 0x70, 0x45, 0x91, 0xf7, 0xa1, 0x07, 0x8e, 0xe4, 0x22, 0xdd, 0xa0, 0x8d, 0xf9, - 0xd6, 0xd0, 0xbd, 0x1c, 0x25, 0xd9, 0xa3, 0xb5, 0x34, 0x59, 0x25, 0xff, 0x64, 0x5f, 0xd0, 0x26, - 0xaf, 0x71, 0x59, 0xf2, 0x9e, 0x5a, 0x65, 0x9f, 0x87, 0x59, 0x1f, 0x17, 0xa6, 0xa0, 0x36, 0x49, - 0xa5, 0x7a, 0x35, 0x86, 0x59, 0x37, 0x52, 0xe1, 0xf2, 0x24, 0x16, 0xd9, 0x79, 0x9d, 0xda, 0x03, - 0x7c, 0x7d, 0xaa, 0x9e, 0x19, 0x2b, 0xb3, 0x96, 0x5f, 0x99, 0xe5, 0xdc, 0x0a, 0x58, 0xad, 0xa3, - 0x73, 0xa3, 0x70, 0x7c, 0x6e, 0x14, 0x4e, 0xce, 0x8d, 0xc2, 0xc7, 0x91, 0x81, 0x8e, 0x46, 0x06, - 0x3a, 0x1e, 0x19, 0xe8, 0x64, 0x64, 0xa0, 0x1f, 0x23, 0x03, 0x1d, 0xfe, 0x34, 0x0a, 0x6f, 0x8a, - 0x83, 0xcd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x40, 0xe0, 0x08, 0xf3, 0x05, 0x00, 0x00, +var fileDescriptor_9007436710e7565b = []byte{ + // 643 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xcd, 0xa6, 0xbf, 0xaa, 0xe9, 0x26, 0xfd, 0x51, 0x96, 0x1e, 0xa2, 0x08, 0x39, 0x51, 0x4e, + 0x05, 0xa9, 0xeb, 0xb6, 0x42, 0xa8, 0xe2, 0x82, 0x64, 0x68, 0x05, 0x12, 0x14, 0x70, 0xe1, 0x82, + 0x38, 0xb0, 0xb5, 0x17, 0x67, 0x9b, 0xd8, 0x1b, 0xd9, 0xeb, 0x88, 0xdc, 0x10, 0x17, 0x24, 0x4e, + 0xfd, 0x2e, 0x1c, 0xf8, 0x0a, 0x15, 0xa7, 0x1e, 0x7b, 0x6a, 0xa9, 0xf9, 0x16, 0x9c, 0xd0, 0xae, + 0xff, 0x64, 0x83, 0x43, 0x28, 0x37, 0xef, 0xec, 0x7b, 0x6f, 0x66, 0xde, 0xec, 0x18, 0x76, 0xfb, + 0x3b, 0x11, 0x66, 0xdc, 0x24, 0x43, 0x66, 0x06, 0xdc, 0xa5, 0xe6, 0x68, 0xcb, 0xf4, 0x68, 0x40, + 0x43, 0x22, 0xa8, 0x8b, 0x87, 0x21, 0x17, 0x1c, 0xa1, 0x14, 0x83, 0xc9, 0x90, 0x61, 0x89, 0xc1, + 0xa3, 0xad, 0xd6, 0x86, 0xc7, 0x44, 0x2f, 0x3e, 0xc4, 0x0e, 0xf7, 0x4d, 0x8f, 0x7b, 0xdc, 0x54, + 0xd0, 0xc3, 0xf8, 0x9d, 0x3a, 0xa9, 0x83, 0xfa, 0x4a, 0x25, 0x5a, 0x7a, 0x1a, 0x87, 0x87, 0xb3, + 0xd2, 0xb4, 0xee, 0x4c, 0x30, 0x3e, 0x71, 0x7a, 0x2c, 0xa0, 0xe1, 0xd8, 0x1c, 0xf6, 0x3d, 0x45, + 0x0a, 0x69, 0xc4, 0xe3, 0xd0, 0xa1, 0xff, 0xc4, 0x8a, 0x4c, 0x9f, 0x0a, 0x32, 0x2b, 0x97, 0xf9, + 0x27, 0x56, 0x18, 0x07, 0x82, 0xf9, 0xe5, 0x34, 0x77, 0xff, 0x46, 0x88, 0x9c, 0x1e, 0xf5, 0xc9, + 0xef, 0xbc, 0xee, 0xb7, 0x2a, 0xac, 0x3d, 0x1b, 0xd1, 0xb0, 0x47, 0x89, 0x8b, 0x4e, 0x01, 0xac, + 0x0d, 0xb9, 0xbb, 0xc7, 0xde, 0x53, 0xb7, 0x09, 0x3a, 0x0b, 0xeb, 0xf5, 0xed, 0xdb, 0xb8, 0x6c, + 0x2e, 0xce, 0x09, 0xf8, 0x79, 0x06, 0xde, 0x0d, 0x44, 0x38, 0xb6, 0x3e, 0x81, 0x93, 0xf3, 0x76, + 0x25, 0x39, 0x6f, 0xd7, 0xf2, 0xf8, 0xcf, 0xf3, 0x76, 0xbb, 0xec, 0x2c, 0xb6, 0x33, 0xb3, 0x9e, + 0xb0, 0x48, 0x7c, 0xbc, 0x98, 0x0b, 0xd9, 0x27, 0x3e, 0xfd, 0x7c, 0xd1, 0xde, 0xb8, 0x8a, 0xf7, + 0xf8, 0x45, 0x4c, 0x02, 0xc1, 0xc4, 0xd8, 0x2e, 0xba, 0x68, 0xf5, 0xe1, 0xca, 0x54, 0x91, 0x68, + 0x15, 0x2e, 0xf4, 0xe9, 0xb8, 0x09, 0x3a, 0x60, 0x7d, 0xd9, 0x96, 0x9f, 0xe8, 0x21, 0x5c, 0x1c, + 0x91, 0x41, 0x4c, 0x9b, 0xd5, 0x0e, 0x58, 0xaf, 0x6f, 0x63, 0xad, 0xe3, 0x22, 0x17, 0x1e, 0xf6, + 0x3d, 0x65, 0x41, 0x39, 0x57, 0x4a, 0xbe, 0x57, 0xdd, 0x01, 0xdd, 0x2f, 0x55, 0xd8, 0xb0, 0x53, + 0xbf, 0x1f, 0x0c, 0x48, 0x14, 0xa1, 0xb7, 0xb0, 0x26, 0x27, 0xec, 0x12, 0x41, 0x54, 0xc6, 0xfa, + 0xf6, 0xe6, 0x3c, 0xf5, 0x08, 0x4b, 0xb4, 0x72, 0xf8, 0xf0, 0x88, 0x3a, 0xe2, 0x29, 0x15, 0xc4, + 0x42, 0x99, 0xa9, 0x70, 0x12, 0xb3, 0x0b, 0x55, 0x74, 0x0b, 0x2e, 0xf5, 0x48, 0xe0, 0x0e, 0x68, + 0xa8, 0xca, 0x5f, 0xb6, 0xae, 0x65, 0xf0, 0xa5, 0x47, 0x69, 0xd8, 0xce, 0xef, 0xd1, 0x1e, 0xac, + 0xf1, 0x6c, 0x70, 0xcd, 0x05, 0x55, 0xcc, 0xcd, 0x79, 0xc3, 0xb5, 0x1a, 0x72, 0x92, 0xf9, 0xc9, + 0x2e, 0xb8, 0x68, 0x1f, 0x42, 0xf9, 0x98, 0xdc, 0x78, 0xc0, 0x02, 0xaf, 0xf9, 0x9f, 0x52, 0x32, + 0x66, 0x29, 0x1d, 0x14, 0x28, 0xeb, 0x7f, 0xd9, 0xc0, 0xe4, 0x6c, 0x6b, 0x0a, 0xdd, 0xaf, 0x00, + 0xae, 0xea, 0xae, 0xc9, 0x57, 0x81, 0xde, 0x94, 0x9c, 0xc3, 0x57, 0x73, 0x4e, 0xb2, 0x95, 0x6f, + 0xab, 0xf9, 0x63, 0xcc, 0x23, 0x9a, 0x6b, 0xbb, 0x70, 0x91, 0x09, 0xea, 0x47, 0xcd, 0xaa, 0x7a, + 0xe4, 0x9d, 0x59, 0xd5, 0xeb, 0x25, 0x59, 0x2b, 0x99, 0xd8, 0xe2, 0x63, 0x49, 0xb3, 0x53, 0x76, + 0xf7, 0xb8, 0x0a, 0xb5, 0xa6, 0xd0, 0x11, 0x6c, 0x48, 0xf2, 0x01, 0x1d, 0x50, 0x47, 0xf0, 0x30, + 0xdb, 0xa0, 0xcd, 0xf9, 0xd6, 0xe0, 0x7d, 0x8d, 0x92, 0xee, 0xd1, 0x5a, 0x96, 0xac, 0xa1, 0x5f, + 0xd9, 0x53, 0xda, 0xe8, 0x15, 0xac, 0x0b, 0x3e, 0x90, 0xab, 0xcc, 0x78, 0x90, 0xf7, 0x31, 0x35, + 0x05, 0xb9, 0x49, 0x32, 0xd5, 0xcb, 0x02, 0x66, 0xdd, 0xc8, 0x84, 0xeb, 0x93, 0x58, 0x64, 0xeb, + 0x3a, 0xad, 0xfb, 0xf0, 0x7a, 0xa9, 0x9e, 0x19, 0x2b, 0xb3, 0xa6, 0xaf, 0xcc, 0xb2, 0xb6, 0x02, + 0xd6, 0xce, 0xc9, 0xa5, 0x51, 0x39, 0xbd, 0x34, 0x2a, 0x67, 0x97, 0x46, 0xe5, 0x43, 0x62, 0x80, + 0x93, 0xc4, 0x00, 0xa7, 0x89, 0x01, 0xce, 0x12, 0x03, 0x7c, 0x4f, 0x0c, 0x70, 0xfc, 0xc3, 0xa8, + 0xbc, 0x46, 0xe5, 0xbf, 0xfa, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x3f, 0x9c, 0xd0, 0xea, + 0x05, 0x00, 0x00, } func (m *Overhead) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto index 0b98cabb2..e6b8852ec 100644 --- a/vendor/k8s.io/api/node/v1/generated.proto +++ b/vendor/k8s.io/api/node/v1/generated.proto @@ -28,13 +28,13 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/node/v1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,9 +47,9 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -61,15 +61,13 @@ message RuntimeClass { // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ - // This field is in beta starting v1.18 - // and is only honored by servers that enable the PodOverhead feature. // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -82,9 +80,9 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -105,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go index bfe947e1f..169862ea9 100644 --- a/vendor/k8s.io/api/node/v1/types.go +++ b/vendor/k8s.io/api/node/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run @@ -34,11 +35,12 @@ import ( // https://kubernetes.io/docs/concepts/containers/runtime-class/ type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -50,15 +52,13 @@ type RuntimeClass struct { // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/ - // This field is in beta starting v1.18 - // and is only honored by servers that enable the PodOverhead feature. // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -68,7 +68,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -94,15 +94,17 @@ type Scheduling struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go index c68c40e90..f5e6b3277 100644 --- a/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/\nThis field is in beta starting v1.18 and is only honored by servers that enable the PodOverhead feature.", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..749795568 --- /dev/null +++ b/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClass) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go index abd2c09b6..16ac69643 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto +// source: k8s.io/api/node/v1alpha1/generated.proto package v1alpha1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{0} + return fileDescriptor_a8fee97bf5273e47, []int{0} } func (m *Overhead) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_Overhead proto.InternalMessageInfo func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } func (*RuntimeClass) ProtoMessage() {} func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{1} + return fileDescriptor_a8fee97bf5273e47, []int{1} } func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } func (*RuntimeClassList) ProtoMessage() {} func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{2} + return fileDescriptor_a8fee97bf5273e47, []int{2} } func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } func (*RuntimeClassSpec) ProtoMessage() {} func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{3} + return fileDescriptor_a8fee97bf5273e47, []int{3} } func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo func (m *Scheduling) Reset() { *m = Scheduling{} } func (*Scheduling) ProtoMessage() {} func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{4} + return fileDescriptor_a8fee97bf5273e47, []int{4} } func (m *Scheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -199,55 +199,54 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) + proto.RegisterFile("k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_a8fee97bf5273e47) } -var fileDescriptor_82a78945ab308218 = []byte{ - // 698 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbf, 0x6f, 0xd3, 0x4e, - 0x14, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x5f, 0x7f, 0x2b, 0x14, 0x65, 0x70, 0x2a, 0x0b, - 0xa1, 0x0a, 0xa9, 0x67, 0x5a, 0xa1, 0xaa, 0x62, 0x00, 0x61, 0x7e, 0x08, 0x44, 0x69, 0xc1, 0x2d, - 0x0b, 0x62, 0xe0, 0x62, 0x3f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, 0x89, - 0x89, 0x89, 0xff, 0x06, 0xe6, 0x8e, 0x9d, 0x50, 0xa7, 0x96, 0x86, 0xff, 0x81, 0x81, 0x09, 0x9d, - 0x7d, 0x4e, 0x9c, 0xa4, 0xa1, 0x61, 0xf3, 0xdd, 0x7d, 0x7e, 0xdc, 0xfb, 0xbc, 0x7b, 0xc6, 0x77, - 0x3b, 0x3b, 0x31, 0x71, 0x99, 0xde, 0x49, 0x5a, 0x10, 0x05, 0xc0, 0x21, 0xd6, 0xbb, 0x10, 0xd8, - 0x2c, 0xd2, 0xe5, 0x01, 0x0d, 0x5d, 0x3d, 0x60, 0x36, 0xe8, 0xdd, 0x4d, 0xea, 0x85, 0x6d, 0xba, - 0xa9, 0x3b, 0x10, 0x40, 0x44, 0x39, 0xd8, 0x24, 0x8c, 0x18, 0x67, 0x4a, 0x3d, 0x43, 0x12, 0x1a, - 0xba, 0x44, 0x20, 0x49, 0x8e, 0x6c, 0x6c, 0x38, 0x2e, 0x6f, 0x27, 0x2d, 0x62, 0x31, 0x5f, 0x77, - 0x98, 0xc3, 0xf4, 0x94, 0xd0, 0x4a, 0xde, 0xa4, 0xab, 0x74, 0x91, 0x7e, 0x65, 0x42, 0x0d, 0xad, - 0x60, 0x69, 0xb1, 0x48, 0x58, 0x8e, 0x9b, 0x35, 0x6e, 0x0e, 0x31, 0x3e, 0xb5, 0xda, 0x6e, 0x00, - 0x51, 0x4f, 0x0f, 0x3b, 0x4e, 0x4a, 0x8a, 0x20, 0x66, 0x49, 0x64, 0xc1, 0x3f, 0xb1, 0x62, 0xdd, - 0x07, 0x4e, 0x2f, 0xf2, 0xd2, 0xa7, 0xb1, 0xa2, 0x24, 0xe0, 0xae, 0x3f, 0x69, 0xb3, 0x7d, 0x19, - 0x21, 0xb6, 0xda, 0xe0, 0xd3, 0x71, 0x9e, 0x76, 0x5c, 0xc6, 0x95, 0xfd, 0x2e, 0x44, 0x6d, 0xa0, - 0xb6, 0xf2, 0x1d, 0xe1, 0x4a, 0xc8, 0xec, 0x87, 0xee, 0x3b, 0xb0, 0xeb, 0x68, 0x6d, 0x6e, 0xbd, - 0xba, 0x75, 0x83, 0x4c, 0x8b, 0x98, 0xe4, 0x34, 0xf2, 0x4c, 0x52, 0x1e, 0x04, 0x3c, 0xea, 0x19, - 0x1f, 0xd1, 0xd1, 0x69, 0xb3, 0xd4, 0x3f, 0x6d, 0x56, 0xf2, 0xfd, 0xdf, 0xa7, 0xcd, 0xe6, 0x64, - 0xbe, 0xc4, 0x94, 0x91, 0xed, 0xba, 0x31, 0xff, 0x70, 0xf6, 0x57, 0xc8, 0x1e, 0xf5, 0xe1, 0xd3, - 0x59, 0x73, 0x63, 0x96, 0x0e, 0x90, 0xe7, 0x09, 0x0d, 0xb8, 0xcb, 0x7b, 0xe6, 0xa0, 0x96, 0x46, - 0x07, 0x2f, 0x8d, 0x5c, 0x52, 0x59, 0xc1, 0x73, 0x1d, 0xe8, 0xd5, 0xd1, 0x1a, 0x5a, 0x5f, 0x34, - 0xc5, 0xa7, 0x72, 0x1f, 0x2f, 0x74, 0xa9, 0x97, 0x40, 0xbd, 0xbc, 0x86, 0xd6, 0xab, 0x5b, 0xa4, - 0x50, 0xf7, 0xc0, 0x8b, 0x84, 0x1d, 0x27, 0x0d, 0x62, 0xd2, 0x2b, 0x23, 0xdf, 0x2a, 0xef, 0x20, - 0xed, 0x1b, 0xc2, 0x35, 0x33, 0x4b, 0xfd, 0x9e, 0x47, 0xe3, 0x58, 0x79, 0x8d, 0x2b, 0xa2, 0xcf, - 0x36, 0xe5, 0x34, 0x75, 0x1c, 0x4d, 0x75, 0x42, 0x3d, 0x26, 0x02, 0x4d, 0xba, 0x9b, 0x64, 0xbf, - 0xf5, 0x16, 0x2c, 0xfe, 0x14, 0x38, 0x35, 0x14, 0x19, 0x2a, 0x1e, 0xee, 0x99, 0x03, 0x55, 0x65, - 0x17, 0xcf, 0xc7, 0x21, 0x58, 0xf2, 0xee, 0xd7, 0xa7, 0xf7, 0xac, 0x78, 0xaf, 0x83, 0x10, 0x2c, - 0xa3, 0x26, 0x75, 0xe7, 0xc5, 0xca, 0x4c, 0x55, 0xb4, 0xaf, 0x08, 0xaf, 0x14, 0x81, 0xa2, 0x41, - 0xca, 0xab, 0x89, 0x22, 0xc8, 0x6c, 0x45, 0x08, 0x76, 0x5a, 0xc2, 0x4a, 0xfe, 0x2e, 0xf2, 0x9d, - 0x42, 0x01, 0x4f, 0xf0, 0x82, 0xcb, 0xc1, 0x8f, 0xeb, 0xe5, 0xf4, 0xd5, 0x5d, 0x9b, 0xad, 0x02, - 0x63, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x33, 0xd3, 0xd0, 0x7e, 0x8d, 0xdd, 0x5f, 0x94, 0xa6, - 0xdc, 0xc6, 0xcb, 0x72, 0x14, 0x1e, 0xd1, 0xc0, 0xf6, 0x20, 0xca, 0x9a, 0x6f, 0x5c, 0x91, 0x12, - 0xcb, 0xe6, 0xc8, 0xa9, 0x39, 0x86, 0x56, 0x76, 0x71, 0x85, 0xc9, 0x07, 0x2f, 0x63, 0xd6, 0x2e, - 0x1f, 0x0d, 0xa3, 0x26, 0xea, 0xcd, 0x57, 0xe6, 0x40, 0x41, 0x39, 0xc4, 0x58, 0x0c, 0xa4, 0x9d, - 0x78, 0x6e, 0xe0, 0xd4, 0xe7, 0x52, 0xbd, 0xab, 0xd3, 0xf5, 0x0e, 0x06, 0x58, 0x63, 0x59, 0x3c, - 0x82, 0xe1, 0xda, 0x2c, 0xe8, 0x68, 0x5f, 0xca, 0xb8, 0x70, 0xa4, 0x84, 0xb8, 0x26, 0x64, 0x0e, - 0xc0, 0x03, 0x8b, 0xb3, 0x48, 0x4e, 0xf4, 0xf6, 0x2c, 0x36, 0x64, 0xaf, 0x40, 0xcc, 0xe6, 0x7a, - 0x55, 0x06, 0x55, 0x2b, 0x1e, 0x99, 0x23, 0x0e, 0xca, 0x0b, 0x5c, 0xe5, 0xcc, 0x13, 0x3f, 0x18, - 0x97, 0x05, 0x79, 0x33, 0xd5, 0xa2, 0xa1, 0x98, 0x6c, 0xf1, 0x2a, 0x0e, 0x07, 0x30, 0xe3, 0x7f, - 0x29, 0x5c, 0x1d, 0xee, 0xc5, 0x66, 0x51, 0xa7, 0x71, 0x07, 0xff, 0x37, 0x71, 0x9f, 0x0b, 0x46, - 0x78, 0xb5, 0x38, 0xc2, 0x8b, 0x85, 0x91, 0x34, 0xc8, 0xd1, 0xb9, 0x5a, 0x3a, 0x3e, 0x57, 0x4b, - 0x27, 0xe7, 0x6a, 0xe9, 0x7d, 0x5f, 0x45, 0x47, 0x7d, 0x15, 0x1d, 0xf7, 0x55, 0x74, 0xd2, 0x57, - 0xd1, 0x8f, 0xbe, 0x8a, 0x3e, 0xff, 0x54, 0x4b, 0x2f, 0x2b, 0x79, 0x10, 0x7f, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xa8, 0x77, 0xef, 0x80, 0x9b, 0x06, 0x00, 0x00, +var fileDescriptor_a8fee97bf5273e47 = []byte{ + // 683 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x3d, 0x6f, 0xd3, 0x4c, + 0x1c, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x8f, 0x9f, 0x0a, 0x45, 0x19, 0x9c, 0xca, 0x42, + 0x28, 0x42, 0xea, 0x99, 0x56, 0xa8, 0xaa, 0x18, 0x8a, 0x64, 0x5e, 0x04, 0xa2, 0xb4, 0x70, 0x2d, + 0x0b, 0x62, 0xe0, 0x6a, 0x1f, 0x8e, 0x89, 0xed, 0xb3, 0xec, 0x73, 0x44, 0x36, 0xc4, 0x82, 0xc4, + 0xc4, 0xc4, 0xb7, 0x81, 0xb9, 0x63, 0x27, 0xd4, 0xa9, 0xa5, 0xe1, 0x3b, 0x30, 0x30, 0xa1, 0xb3, + 0xcf, 0xc9, 0x25, 0x69, 0x68, 0xd8, 0x7c, 0x77, 0xbf, 0x97, 0xff, 0x6b, 0x02, 0x5b, 0x9d, 0xed, + 0x04, 0x79, 0xcc, 0x24, 0x91, 0x67, 0x86, 0xcc, 0xa1, 0x66, 0x77, 0x83, 0xf8, 0x51, 0x9b, 0x6c, + 0x98, 0x2e, 0x0d, 0x69, 0x4c, 0x38, 0x75, 0x50, 0x14, 0x33, 0xce, 0xb4, 0x7a, 0x8e, 0x44, 0x24, + 0xf2, 0x90, 0x40, 0xa2, 0x02, 0xd9, 0x58, 0x77, 0x3d, 0xde, 0x4e, 0x8f, 0x90, 0xcd, 0x02, 0xd3, + 0x65, 0x2e, 0x33, 0x33, 0xc2, 0x51, 0xfa, 0x26, 0x3b, 0x65, 0x87, 0xec, 0x2b, 0x17, 0x6a, 0x18, + 0x8a, 0xa5, 0xcd, 0x62, 0x61, 0x39, 0x6e, 0xd6, 0xb8, 0x3d, 0xc4, 0x04, 0xc4, 0x6e, 0x7b, 0x21, + 0x8d, 0x7b, 0x66, 0xd4, 0x71, 0x33, 0x52, 0x4c, 0x13, 0x96, 0xc6, 0x36, 0xfd, 0x27, 0x56, 0x62, + 0x06, 0x94, 0x93, 0xcb, 0xbc, 0xcc, 0x69, 0xac, 0x38, 0x0d, 0xb9, 0x17, 0x4c, 0xda, 0x6c, 0x5d, + 0x45, 0x48, 0xec, 0x36, 0x0d, 0xc8, 0x38, 0xcf, 0x38, 0x29, 0xc3, 0xca, 0x7e, 0x97, 0xc6, 0x6d, + 0x4a, 0x1c, 0xed, 0x3b, 0x80, 0x95, 0x88, 0x39, 0x0f, 0xbd, 0x77, 0xd4, 0xa9, 0x83, 0xb5, 0xb9, + 0x56, 0x75, 0xf3, 0x16, 0x9a, 0x56, 0x62, 0x54, 0xd0, 0xd0, 0x33, 0x49, 0x79, 0x10, 0xf2, 0xb8, + 0x67, 0x7d, 0x04, 0xc7, 0x67, 0xcd, 0x52, 0xff, 0xac, 0x59, 0x29, 0xee, 0x7f, 0x9f, 0x35, 0x9b, + 0x93, 0xf5, 0x45, 0x58, 0x96, 0x6c, 0xd7, 0x4b, 0xf8, 0x87, 0xf3, 0xbf, 0x42, 0xf6, 0x48, 0x40, + 0x3f, 0x9d, 0x37, 0xd7, 0x67, 0xe9, 0x00, 0x7a, 0x9e, 0x92, 0x90, 0x7b, 0xbc, 0x87, 0x07, 0xb9, + 0x34, 0x3a, 0x70, 0x69, 0x24, 0x48, 0x6d, 0x05, 0xce, 0x75, 0x68, 0xaf, 0x0e, 0xd6, 0x40, 0x6b, + 0x11, 0x8b, 0x4f, 0xed, 0x3e, 0x5c, 0xe8, 0x12, 0x3f, 0xa5, 0xf5, 0xf2, 0x1a, 0x68, 0x55, 0x37, + 0x91, 0x92, 0xf7, 0xc0, 0x0b, 0x45, 0x1d, 0x37, 0x2b, 0xc4, 0xa4, 0x57, 0x4e, 0xbe, 0x53, 0xde, + 0x06, 0xc6, 0x37, 0x00, 0x6b, 0x38, 0xaf, 0xfa, 0x3d, 0x9f, 0x24, 0x89, 0xf6, 0x1a, 0x56, 0x44, + 0x9f, 0x1d, 0xc2, 0x49, 0xe6, 0x38, 0x5a, 0xd5, 0x09, 0xf5, 0x04, 0x09, 0x34, 0xea, 0x6e, 0xa0, + 0xfd, 0xa3, 0xb7, 0xd4, 0xe6, 0x4f, 0x29, 0x27, 0x96, 0x26, 0x8b, 0x0a, 0x87, 0x77, 0x78, 0xa0, + 0xaa, 0xed, 0xc2, 0xf9, 0x24, 0xa2, 0xb6, 0x8c, 0xfd, 0xe6, 0xf4, 0x9e, 0xa9, 0x71, 0x1d, 0x44, + 0xd4, 0xb6, 0x6a, 0x52, 0x77, 0x5e, 0x9c, 0x70, 0xa6, 0x62, 0x7c, 0x05, 0x70, 0x45, 0x05, 0x8a, + 0x06, 0x69, 0xaf, 0x26, 0x92, 0x40, 0xb3, 0x25, 0x21, 0xd8, 0x59, 0x0a, 0x2b, 0xc5, 0x5c, 0x14, + 0x37, 0x4a, 0x02, 0x4f, 0xe0, 0x82, 0xc7, 0x69, 0x90, 0xd4, 0xcb, 0xd9, 0xd4, 0xdd, 0x98, 0x2d, + 0x03, 0x6b, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x9c, 0x6b, 0x18, 0xbf, 0xc6, 0xe2, 0x17, 0xa9, + 0x69, 0x3b, 0x70, 0x59, 0xae, 0xc2, 0x23, 0x12, 0x3a, 0x3e, 0x8d, 0xf3, 0xe6, 0x5b, 0xd7, 0xa4, + 0xc4, 0x32, 0x1e, 0x79, 0xc5, 0x63, 0x68, 0x6d, 0x17, 0x56, 0x98, 0x1c, 0x78, 0x59, 0x66, 0xe3, + 0xea, 0xd5, 0xb0, 0x6a, 0x22, 0xdf, 0xe2, 0x84, 0x07, 0x0a, 0xda, 0x21, 0x84, 0x62, 0x21, 0x9d, + 0xd4, 0xf7, 0x42, 0xb7, 0x3e, 0x97, 0xe9, 0x5d, 0x9f, 0xae, 0x77, 0x30, 0xc0, 0x5a, 0xcb, 0x62, + 0x08, 0x86, 0x67, 0xac, 0xe8, 0x18, 0x5f, 0xca, 0x50, 0x79, 0xd2, 0x22, 0x58, 0x13, 0x32, 0x07, + 0xd4, 0xa7, 0x36, 0x67, 0xb1, 0xdc, 0xe8, 0xad, 0x59, 0x6c, 0xd0, 0x9e, 0x42, 0xcc, 0xf7, 0x7a, + 0x55, 0x16, 0xaa, 0xa6, 0x3e, 0xe1, 0x11, 0x07, 0xed, 0x05, 0xac, 0x72, 0xe6, 0x8b, 0x1f, 0x18, + 0x8f, 0x85, 0x45, 0x33, 0x75, 0xd5, 0x50, 0x6c, 0xb6, 0x98, 0x8a, 0xc3, 0x01, 0xcc, 0xfa, 0x5f, + 0x0a, 0x57, 0x87, 0x77, 0x09, 0x56, 0x75, 0x1a, 0x77, 0xe1, 0x7f, 0x13, 0xf1, 0x5c, 0xb2, 0xc2, + 0xab, 0xea, 0x0a, 0x2f, 0x2a, 0x2b, 0x69, 0xed, 0x1c, 0x5f, 0xe8, 0xa5, 0x93, 0x0b, 0xbd, 0x74, + 0x7a, 0xa1, 0x97, 0xde, 0xf7, 0x75, 0x70, 0xdc, 0xd7, 0xc1, 0x49, 0x5f, 0x07, 0xa7, 0x7d, 0x1d, + 0xfc, 0xe8, 0xeb, 0xe0, 0xf3, 0x4f, 0xbd, 0xf4, 0xb2, 0x3e, 0xed, 0x7f, 0xe7, 0x4f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xa7, 0x9b, 0x7f, 0x45, 0x92, 0x06, 0x00, 0x00, } func (m *Overhead) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto index d92e18ff3..bc68718d9 100644 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -28,13 +28,13 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "k8s.io/api/node/v1alpha1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,9 +47,9 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status optional RuntimeClassSpec spec = 2; } @@ -59,9 +59,9 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -70,7 +70,7 @@ message RuntimeClassList { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. message RuntimeClassSpec { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -78,18 +78,17 @@ message RuntimeClassSpec { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. optional string runtimeHandler = 1; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional optional Overhead overhead = 2; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -114,6 +113,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go index f11bcbb10..bf9e284bf 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -34,11 +34,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the RuntimeClass + // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` } @@ -48,7 +49,7 @@ type RuntimeClass struct { // Interface (CRI) implementation, as well as any other components that need to // understand how the pod will be run. The RuntimeClassSpec is immutable. type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the + // runtimeHandler specifies the underlying runtime and configuration that the // CRI implementation will use to handle pods of this class. The possible // values are specific to the node & CRI configuration. It is assumed that // all handlers are available on every node, and handlers of the same name are @@ -56,18 +57,17 @@ type RuntimeClassSpec struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) + // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) // requirements, and is immutable. RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -77,7 +77,7 @@ type RuntimeClassSpec struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -107,11 +107,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go index 5a259573c..ccc1b7085 100644 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "spec": "spec represents specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -49,7 +49,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { @@ -58,9 +58,9 @@ func (RuntimeClassList) SwaggerDoc() map[string]string { var map_RuntimeClassSpec = map[string]string{ "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "runtimeHandler": "runtimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClassSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go index 4bfdd5df3..537961c25 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto +// source: k8s.io/api/node/v1beta1/generated.proto package v1beta1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Overhead) Reset() { *m = Overhead{} } func (*Overhead) ProtoMessage() {} func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{0} + return fileDescriptor_73bb62abe8438af4, []int{0} } func (m *Overhead) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_Overhead proto.InternalMessageInfo func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } func (*RuntimeClass) ProtoMessage() {} func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{1} + return fileDescriptor_73bb62abe8438af4, []int{1} } func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } func (*RuntimeClassList) ProtoMessage() {} func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{2} + return fileDescriptor_73bb62abe8438af4, []int{2} } func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo func (m *Scheduling) Reset() { *m = Scheduling{} } func (*Scheduling) ProtoMessage() {} func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{3} + return fileDescriptor_73bb62abe8438af4, []int{3} } func (m *Scheduling) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -170,53 +170,52 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) + proto.RegisterFile("k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_73bb62abe8438af4) } -var fileDescriptor_f977b0dddc93b4ec = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbd, 0x6f, 0xd3, 0x40, - 0x14, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0x89, 0x28, 0x83, 0x53, 0x82, 0x90, 0xca, - 0xd0, 0x33, 0xad, 0x00, 0x55, 0x2c, 0x20, 0xf3, 0x21, 0x3e, 0x5b, 0x70, 0x61, 0x41, 0x0c, 0x5c, - 0xec, 0x87, 0x63, 0x12, 0xfb, 0xa2, 0xf3, 0x39, 0x22, 0x1b, 0x62, 0x41, 0x62, 0x62, 0xe1, 0xbf, - 0x81, 0xbd, 0x1b, 0x5d, 0x90, 0x3a, 0xb5, 0x34, 0xfc, 0x17, 0x4c, 0xe8, 0xec, 0x73, 0x72, 0x6d, - 0x9a, 0xb6, 0x6c, 0xbe, 0xf3, 0xef, 0xe3, 0xbd, 0xdf, 0xbb, 0x87, 0xef, 0xb4, 0xd7, 0x62, 0x12, - 0x30, 0xab, 0x9d, 0x34, 0x81, 0x47, 0x20, 0x20, 0xb6, 0x7a, 0x10, 0x79, 0x8c, 0x5b, 0xea, 0x07, - 0xed, 0x06, 0x56, 0xc4, 0x3c, 0xb0, 0x7a, 0x2b, 0x4d, 0x10, 0x74, 0xc5, 0xf2, 0x21, 0x02, 0x4e, - 0x05, 0x78, 0xa4, 0xcb, 0x99, 0x60, 0xc6, 0xc5, 0x0c, 0x48, 0x68, 0x37, 0x20, 0x12, 0x48, 0x14, - 0xb0, 0xb6, 0xec, 0x07, 0xa2, 0x95, 0x34, 0x89, 0xcb, 0x42, 0xcb, 0x67, 0x3e, 0xb3, 0x52, 0x7c, - 0x33, 0x79, 0x97, 0x9e, 0xd2, 0x43, 0xfa, 0x95, 0xe9, 0xd4, 0x1a, 0x9a, 0xa1, 0xcb, 0xb8, 0x34, - 0x3c, 0xec, 0x55, 0xbb, 0x3e, 0xc2, 0x84, 0xd4, 0x6d, 0x05, 0x11, 0xf0, 0xbe, 0xd5, 0x6d, 0xfb, - 0x29, 0x89, 0x43, 0xcc, 0x12, 0xee, 0xc2, 0x7f, 0xb1, 0x62, 0x2b, 0x04, 0x41, 0x8f, 0xf2, 0xb2, - 0x26, 0xb1, 0x78, 0x12, 0x89, 0x20, 0x1c, 0xb7, 0xb9, 0x79, 0x12, 0x21, 0x76, 0x5b, 0x10, 0xd2, - 0xc3, 0xbc, 0xc6, 0xcf, 0x22, 0x2e, 0x6d, 0xf4, 0x80, 0xb7, 0x80, 0x7a, 0xc6, 0x2f, 0x84, 0x4b, - 0x5d, 0xe6, 0x3d, 0x08, 0x3e, 0x80, 0x57, 0x45, 0x8b, 0x53, 0x4b, 0xe5, 0x55, 0x8b, 0x4c, 0x48, - 0x98, 0xe4, 0x2c, 0xf2, 0x5c, 0x31, 0xee, 0x47, 0x82, 0xf7, 0xed, 0xcf, 0x68, 0x6b, 0xb7, 0x5e, - 0x18, 0xec, 0xd6, 0x4b, 0xf9, 0xfd, 0xdf, 0xdd, 0x7a, 0x7d, 0x3c, 0x5e, 0xe2, 0xa8, 0xc4, 0x9e, - 0x06, 0xb1, 0xf8, 0xb4, 0x77, 0x2c, 0x64, 0x9d, 0x86, 0xf0, 0x65, 0xaf, 0xbe, 0x7c, 0x9a, 0x01, - 0x90, 0x17, 0x09, 0x8d, 0x44, 0x20, 0xfa, 0xce, 0xb0, 0x95, 0x5a, 0x1b, 0xcf, 0x1d, 0x28, 0xd2, - 0x98, 0xc7, 0x53, 0x6d, 0xe8, 0x57, 0xd1, 0x22, 0x5a, 0x9a, 0x75, 0xe4, 0xa7, 0x71, 0x0f, 0x4f, - 0xf7, 0x68, 0x27, 0x81, 0x6a, 0x71, 0x11, 0x2d, 0x95, 0x57, 0x89, 0xd6, 0xf6, 0xd0, 0x8b, 0x74, - 0xdb, 0x7e, 0x9a, 0xc3, 0xb8, 0x57, 0x46, 0xbe, 0x55, 0x5c, 0x43, 0x8d, 0x1f, 0x45, 0x5c, 0x71, - 0xb2, 0xd0, 0xef, 0x76, 0x68, 0x1c, 0x1b, 0x6f, 0x71, 0x49, 0x8e, 0xd9, 0xa3, 0x82, 0xa6, 0x8e, - 0xe5, 0xd5, 0x6b, 0xc7, 0xa9, 0xc7, 0x44, 0xa2, 0x49, 0x6f, 0x85, 0x6c, 0x34, 0xdf, 0x83, 0x2b, - 0x9e, 0x81, 0xa0, 0xb6, 0xa1, 0x42, 0xc5, 0xa3, 0x3b, 0x67, 0xa8, 0x6a, 0x5c, 0xc5, 0x33, 0x2d, - 0x1a, 0x79, 0x1d, 0xe0, 0x69, 0xf9, 0xb3, 0xf6, 0x39, 0x05, 0x9f, 0x79, 0x98, 0x5d, 0x3b, 0xf9, - 0x7f, 0xe3, 0x09, 0x2e, 0x31, 0x35, 0xb8, 0xea, 0x54, 0x5a, 0xcc, 0xa5, 0x13, 0x27, 0x6c, 0x57, - 0xe4, 0x38, 0xf3, 0x93, 0x33, 0x14, 0x30, 0x36, 0x31, 0x96, 0xcf, 0xca, 0x4b, 0x3a, 0x41, 0xe4, - 0x57, 0xcf, 0xa4, 0x72, 0x97, 0x27, 0xca, 0x6d, 0x0e, 0xa1, 0xf6, 0x59, 0xd9, 0xca, 0xe8, 0xec, - 0x68, 0x32, 0x8d, 0xef, 0x08, 0xcf, 0xeb, 0xf9, 0xc9, 0xf7, 0x61, 0xbc, 0x19, 0xcb, 0x90, 0x9c, - 0x2e, 0x43, 0xc9, 0x4e, 0x13, 0x9c, 0xcf, 0x9f, 0x65, 0x7e, 0xa3, 0xe5, 0xf7, 0x18, 0x4f, 0x07, - 0x02, 0xc2, 0xb8, 0x5a, 0x4c, 0xdf, 0xfc, 0x95, 0x89, 0x2d, 0xe8, 0x75, 0xd9, 0x73, 0x4a, 0x71, - 0xfa, 0x91, 0xe4, 0x3a, 0x99, 0x44, 0xe3, 0x5b, 0x11, 0x6b, 0x9d, 0x19, 0x0c, 0x57, 0xa4, 0xc2, - 0x26, 0x74, 0xc0, 0x15, 0x8c, 0xab, 0xad, 0xba, 0x71, 0x8a, 0x90, 0xc8, 0xba, 0xc6, 0xcb, 0x76, - 0x6b, 0x41, 0x39, 0x56, 0xf4, 0x5f, 0xce, 0x01, 0x03, 0xe3, 0x15, 0x2e, 0x0b, 0xd6, 0x91, 0x3b, - 0x1e, 0xb0, 0x28, 0xef, 0xc8, 0xd4, 0xfd, 0xe4, 0x76, 0xc9, 0x68, 0x5e, 0x0e, 0x61, 0xf6, 0x05, - 0x25, 0x5c, 0x1e, 0xdd, 0xc5, 0x8e, 0xae, 0x53, 0xbb, 0x8d, 0xcf, 0x8f, 0xd5, 0x73, 0xc4, 0x1a, - 0x2d, 0xe8, 0x6b, 0x34, 0xab, 0xad, 0x85, 0xbd, 0xbc, 0xb5, 0x6f, 0x16, 0xb6, 0xf7, 0xcd, 0xc2, - 0xce, 0xbe, 0x59, 0xf8, 0x38, 0x30, 0xd1, 0xd6, 0xc0, 0x44, 0xdb, 0x03, 0x13, 0xed, 0x0c, 0x4c, - 0xf4, 0x7b, 0x60, 0xa2, 0xaf, 0x7f, 0xcc, 0xc2, 0xeb, 0x19, 0x95, 0xc3, 0xbf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x5b, 0xcf, 0x13, 0x0c, 0x1b, 0x06, 0x00, 0x00, +var fileDescriptor_73bb62abe8438af4 = []byte{ + // 654 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbb, 0x6f, 0x13, 0x31, + 0x18, 0x8f, 0x53, 0xaa, 0xa6, 0x4e, 0x0a, 0xc5, 0x54, 0x6a, 0x94, 0xe1, 0x52, 0x82, 0x10, 0x65, + 0xa8, 0x8f, 0x56, 0x80, 0x2a, 0x24, 0x84, 0x74, 0x3c, 0xc4, 0xb3, 0x85, 0x2b, 0x2c, 0x88, 0x01, + 0xe7, 0xce, 0x5c, 0x4c, 0x72, 0xe7, 0xe8, 0xce, 0x17, 0x91, 0x0d, 0xb1, 0x20, 0x31, 0xb1, 0xf0, + 0xdf, 0xc0, 0xde, 0x8d, 0x2e, 0x48, 0x9d, 0x5a, 0x1a, 0xfe, 0x0b, 0x26, 0x64, 0xdf, 0x23, 0x6e, + 0xd3, 0xb4, 0x61, 0x8b, 0x7d, 0xbf, 0xc7, 0xf7, 0xfd, 0x3e, 0x7f, 0x81, 0x57, 0xda, 0xeb, 0x11, + 0x66, 0xdc, 0x24, 0x5d, 0x66, 0x06, 0xdc, 0xa5, 0x66, 0x6f, 0xb5, 0x49, 0x05, 0x59, 0x35, 0x3d, + 0x1a, 0xd0, 0x90, 0x08, 0xea, 0xe2, 0x6e, 0xc8, 0x05, 0x47, 0x8b, 0x09, 0x10, 0x93, 0x2e, 0xc3, + 0x12, 0x88, 0x53, 0x60, 0x6d, 0xc5, 0x63, 0xa2, 0x15, 0x37, 0xb1, 0xc3, 0x7d, 0xd3, 0xe3, 0x1e, + 0x37, 0x15, 0xbe, 0x19, 0xbf, 0x53, 0x27, 0x75, 0x50, 0xbf, 0x12, 0x9d, 0x5a, 0x43, 0x33, 0x74, + 0x78, 0x28, 0x0d, 0x8f, 0x7a, 0xd5, 0xae, 0x0f, 0x31, 0x3e, 0x71, 0x5a, 0x2c, 0xa0, 0x61, 0xdf, + 0xec, 0xb6, 0x3d, 0x45, 0x0a, 0x69, 0xc4, 0xe3, 0xd0, 0xa1, 0xff, 0xc5, 0x8a, 0x4c, 0x9f, 0x0a, + 0x72, 0x9c, 0x97, 0x39, 0x8e, 0x15, 0xc6, 0x81, 0x60, 0xfe, 0xa8, 0xcd, 0xcd, 0xd3, 0x08, 0x91, + 0xd3, 0xa2, 0x3e, 0x39, 0xca, 0x6b, 0xfc, 0x2c, 0xc2, 0xd2, 0x66, 0x8f, 0x86, 0x2d, 0x4a, 0x5c, + 0xf4, 0x0b, 0xc0, 0x52, 0x97, 0xbb, 0x0f, 0xd8, 0x07, 0xea, 0x56, 0xc1, 0xd2, 0xd4, 0x72, 0x79, + 0xcd, 0xc4, 0x63, 0x12, 0xc6, 0x19, 0x0b, 0x3f, 0x4f, 0x19, 0xf7, 0x03, 0x11, 0xf6, 0xad, 0xcf, + 0x60, 0x7b, 0xaf, 0x5e, 0x18, 0xec, 0xd5, 0x4b, 0xd9, 0xfd, 0xdf, 0xbd, 0x7a, 0x7d, 0x34, 0x5e, + 0x6c, 0xa7, 0x89, 0x3d, 0x65, 0x91, 0xf8, 0xb4, 0x7f, 0x22, 0x64, 0x83, 0xf8, 0xf4, 0xcb, 0x7e, + 0x7d, 0x65, 0x92, 0x01, 0xe0, 0x17, 0x31, 0x09, 0x04, 0x13, 0x7d, 0x3b, 0x6f, 0xa5, 0xd6, 0x86, + 0x73, 0x87, 0x8a, 0x44, 0xf3, 0x70, 0xaa, 0x4d, 0xfb, 0x55, 0xb0, 0x04, 0x96, 0x67, 0x6d, 0xf9, + 0x13, 0xdd, 0x83, 0xd3, 0x3d, 0xd2, 0x89, 0x69, 0xb5, 0xb8, 0x04, 0x96, 0xcb, 0x6b, 0x58, 0x6b, + 0x3b, 0xf7, 0xc2, 0xdd, 0xb6, 0xa7, 0x72, 0x18, 0xf5, 0x4a, 0xc8, 0xb7, 0x8a, 0xeb, 0xa0, 0xf1, + 0xa3, 0x08, 0x2b, 0x76, 0x12, 0xfa, 0xdd, 0x0e, 0x89, 0x22, 0xf4, 0x16, 0x96, 0xe4, 0x98, 0x5d, + 0x22, 0x88, 0x72, 0x2c, 0xaf, 0x5d, 0x3b, 0x49, 0x3d, 0xc2, 0x12, 0x8d, 0x7b, 0xab, 0x78, 0xb3, + 0xf9, 0x9e, 0x3a, 0xe2, 0x19, 0x15, 0xc4, 0x42, 0x69, 0xa8, 0x70, 0x78, 0x67, 0xe7, 0xaa, 0xe8, + 0x2a, 0x9c, 0x69, 0x91, 0xc0, 0xed, 0xd0, 0x50, 0x95, 0x3f, 0x6b, 0x9d, 0x4b, 0xe1, 0x33, 0x0f, + 0x93, 0x6b, 0x3b, 0xfb, 0x8e, 0x9e, 0xc0, 0x12, 0x4f, 0x07, 0x57, 0x9d, 0x52, 0xc5, 0x5c, 0x3c, + 0x75, 0xc2, 0x56, 0x45, 0x8e, 0x33, 0x3b, 0xd9, 0xb9, 0x00, 0xda, 0x82, 0x50, 0x3e, 0x2b, 0x37, + 0xee, 0xb0, 0xc0, 0xab, 0x9e, 0x51, 0x72, 0x97, 0xc6, 0xca, 0x6d, 0xe5, 0x50, 0xeb, 0xac, 0x6c, + 0x65, 0x78, 0xb6, 0x35, 0x99, 0xc6, 0x77, 0x00, 0xe7, 0xf5, 0xfc, 0xe4, 0xfb, 0x40, 0x6f, 0x46, + 0x32, 0xc4, 0x93, 0x65, 0x28, 0xd9, 0x2a, 0xc1, 0xf9, 0xec, 0x59, 0x66, 0x37, 0x5a, 0x7e, 0x8f, + 0xe1, 0x34, 0x13, 0xd4, 0x8f, 0xaa, 0x45, 0xf5, 0xe6, 0x2f, 0x8f, 0x6d, 0x41, 0xaf, 0xcb, 0x9a, + 0x4b, 0x15, 0xa7, 0x1f, 0x49, 0xae, 0x9d, 0x48, 0x34, 0xbe, 0x15, 0xa1, 0xd6, 0x19, 0xe2, 0xb0, + 0x22, 0x15, 0xb6, 0x68, 0x87, 0x3a, 0x82, 0x87, 0xe9, 0x56, 0xdd, 0x98, 0x20, 0x24, 0xbc, 0xa1, + 0xf1, 0x92, 0xdd, 0x5a, 0x48, 0x1d, 0x2b, 0xfa, 0x27, 0xfb, 0x90, 0x01, 0x7a, 0x05, 0xcb, 0x82, + 0x77, 0xe4, 0x8e, 0x33, 0x1e, 0x64, 0x1d, 0x19, 0xba, 0x9f, 0xdc, 0x2e, 0x19, 0xcd, 0xcb, 0x1c, + 0x66, 0x5d, 0x48, 0x85, 0xcb, 0xc3, 0xbb, 0xc8, 0xd6, 0x75, 0x6a, 0x77, 0xe0, 0xf9, 0x91, 0x7a, + 0x8e, 0x59, 0xa3, 0x05, 0x7d, 0x8d, 0x66, 0xb5, 0xb5, 0xb0, 0x6e, 0x6f, 0x1f, 0x18, 0x85, 0x9d, + 0x03, 0xa3, 0xb0, 0x7b, 0x60, 0x14, 0x3e, 0x0e, 0x0c, 0xb0, 0x3d, 0x30, 0xc0, 0xce, 0xc0, 0x00, + 0xbb, 0x03, 0x03, 0xfc, 0x1e, 0x18, 0xe0, 0xeb, 0x1f, 0xa3, 0xf0, 0x7a, 0x71, 0xcc, 0x1f, 0xff, + 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x67, 0x22, 0x03, 0x12, 0x06, 0x00, 0x00, } func (m *Overhead) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto index 3c1e5bbbd..497027e03 100644 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -28,13 +28,13 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/node/v1beta1"; // Overhead structure represents the resource overhead associated with running a pod. message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,9 +47,9 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -57,18 +57,17 @@ message RuntimeClass { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. optional string handler = 2; - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional optional Overhead overhead = 3; - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -81,9 +80,9 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of schema objects. + // items is a list of schema objects. repeated RuntimeClass items = 2; } @@ -104,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go index c545abf18..74ecca26a 100644 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -36,11 +36,12 @@ import ( // https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class type RuntimeClass struct { metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Handler specifies the underlying runtime and configuration that the CRI + // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values // are specific to the node & CRI configuration. It is assumed that all // handlers are available on every node, and handlers of the same name are @@ -48,18 +49,17 @@ type RuntimeClass struct { // For example, a handler called "runc" might specify that the runc OCI // runtime (using native Linux containers) will be used to run the containers // in a pod. - // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, + // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, // and is immutable. Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - // Overhead represents the resource overhead associated with running a pod for a + // overhead represents the resource overhead associated with running a pod for a // given RuntimeClass. For more details, see // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md - // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature. // +optional Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - // Scheduling holds the scheduling constraints to ensure that pods running + // scheduling holds the scheduling constraints to ensure that pods running // with this RuntimeClass are scheduled to nodes that support it. // If scheduling is nil, this RuntimeClass is assumed to be supported by all // nodes. @@ -69,7 +69,7 @@ type RuntimeClass struct { // Overhead structure represents the resource overhead associated with running a pod. type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. + // podFixed represents the fixed resource overhead associated with running a pod. // +optional PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` } @@ -101,11 +101,12 @@ type Scheduling struct { // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of schema objects. + // items is a list of schema objects. Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go index 6d8871034..086105ecc 100644 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -24,12 +24,12 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Overhead = map[string]string{ "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", + "podFixed": "podFixed represents the fixed resource overhead associated with running a pod.", } func (Overhead) SwaggerDoc() map[string]string { @@ -39,9 +39,9 @@ func (Overhead) SwaggerDoc() map[string]string { var map_RuntimeClass = map[string]string{ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "handler": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "scheduling": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", } func (RuntimeClass) SwaggerDoc() map[string]string { @@ -51,7 +51,7 @@ func (RuntimeClass) SwaggerDoc() map[string]string { var map_RuntimeClassList = map[string]string{ "": "RuntimeClassList is a list of RuntimeClass objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", + "items": "items is a list of schema objects.", } func (RuntimeClassList) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go index b46af58e4..c51e02685 100644 --- a/vendor/k8s.io/api/policy/v1/doc.go +++ b/vendor/k8s.io/api/policy/v1/doc.go @@ -17,8 +17,9 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. package v1 // import "k8s.io/api/policy/v1" diff --git a/vendor/k8s.io/api/policy/v1/generated.pb.go b/vendor/k8s.io/api/policy/v1/generated.pb.go index 183db076a..dd61b7266 100644 --- a/vendor/k8s.io/api/policy/v1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1/generated.proto +// source: k8s.io/api/policy/v1/generated.proto package v1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{0} + return fileDescriptor_204bc6fa48ff56f7, []int{0} } func (m *Eviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_Eviction proto.InternalMessageInfo func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{1} + return fileDescriptor_204bc6fa48ff56f7, []int{1} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{2} + return fileDescriptor_204bc6fa48ff56f7, []int{2} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{3} + return fileDescriptor_204bc6fa48ff56f7, []int{3} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2d50488813b2d18e, []int{4} + return fileDescriptor_204bc6fa48ff56f7, []int{4} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -197,62 +197,64 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1/generated.proto", fileDescriptor_2d50488813b2d18e) + proto.RegisterFile("k8s.io/api/policy/v1/generated.proto", fileDescriptor_204bc6fa48ff56f7) } -var fileDescriptor_2d50488813b2d18e = []byte{ - // 805 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xdf, 0x8e, 0xdb, 0x44, - 0x14, 0xc6, 0xe3, 0x64, 0xb3, 0x2c, 0xd3, 0x24, 0x5a, 0x86, 0x02, 0x4b, 0x2e, 0x1c, 0x94, 0xab, - 0x05, 0xa9, 0x63, 0xb6, 0x45, 0x68, 0x85, 0x04, 0xa2, 0x6e, 0x56, 0x50, 0xd4, 0x25, 0xd5, 0x2c, - 0x08, 0x09, 0x81, 0xc4, 0xc4, 0x3e, 0xcd, 0x0e, 0xb1, 0x3d, 0xd6, 0xcc, 0xd8, 0x34, 0x57, 0xf0, - 0x08, 0xbc, 0x02, 0x8f, 0xc2, 0x15, 0x7b, 0x85, 0x7a, 0x59, 0x71, 0x11, 0xb1, 0xe6, 0x45, 0x90, - 0xc7, 0xce, 0x1f, 0x27, 0x59, 0x35, 0xe5, 0x82, 0x3b, 0xcf, 0x99, 0xf3, 0xfd, 0x8e, 0xcf, 0x37, - 0x67, 0x06, 0x7d, 0x3c, 0x39, 0x55, 0x84, 0x0b, 0x67, 0x92, 0x8c, 0x40, 0x46, 0xa0, 0x41, 0x39, - 0x29, 0x44, 0xbe, 0x90, 0x4e, 0xb9, 0xc1, 0x62, 0xee, 0xc4, 0x22, 0xe0, 0xde, 0xd4, 0x49, 0x4f, - 0x9c, 0x31, 0x44, 0x20, 0x99, 0x06, 0x9f, 0xc4, 0x52, 0x68, 0x81, 0x6f, 0x17, 0x59, 0x84, 0xc5, - 0x9c, 0x14, 0x59, 0x24, 0x3d, 0xe9, 0xde, 0x19, 0x73, 0x7d, 0x99, 0x8c, 0x88, 0x27, 0x42, 0x67, - 0x2c, 0xc6, 0xc2, 0x31, 0xc9, 0xa3, 0xe4, 0x89, 0x59, 0x99, 0x85, 0xf9, 0x2a, 0x20, 0xdd, 0x0f, - 0x96, 0xa5, 0x42, 0xe6, 0x5d, 0xf2, 0x08, 0xe4, 0xd4, 0x89, 0x27, 0xe3, 0x3c, 0xa0, 0x9c, 0x10, - 0x34, 0xdb, 0x52, 0xba, 0xeb, 0xdc, 0xa4, 0x92, 0x49, 0xa4, 0x79, 0x08, 0x1b, 0x82, 0x0f, 0x5f, - 0x24, 0x50, 0xde, 0x25, 0x84, 0x6c, 0x43, 0x77, 0xef, 0x26, 0x5d, 0xa2, 0x79, 0xe0, 0xf0, 0x48, - 0x2b, 0x2d, 0xd7, 0x45, 0xfd, 0xbf, 0x2c, 0x74, 0x70, 0x96, 0x72, 0x4f, 0x73, 0x11, 0xe1, 0x1f, - 0xd0, 0x41, 0xde, 0x85, 0xcf, 0x34, 0x3b, 0xb2, 0xde, 0xb1, 0x8e, 0x6f, 0xdd, 0x7d, 0x9f, 0x2c, - 0x8d, 0x5b, 0x40, 0x49, 0x3c, 0x19, 0xe7, 0x01, 0x45, 0xf2, 0x6c, 0x92, 0x9e, 0x90, 0xe1, 0xe8, - 0x47, 0xf0, 0xf4, 0x39, 0x68, 0xe6, 0xe2, 0xab, 0x59, 0xaf, 0x96, 0xcd, 0x7a, 0x68, 0x19, 0xa3, - 0x0b, 0x2a, 0x0e, 0x50, 0xdb, 0x87, 0x00, 0x34, 0x0c, 0xe3, 0xbc, 0xa2, 0x3a, 0xaa, 0x9b, 0x32, - 0xf7, 0x76, 0x2b, 0x33, 0x58, 0x95, 0xba, 0xaf, 0x65, 0xb3, 0x5e, 0xbb, 0x12, 0xa2, 0x55, 0x78, - 0xff, 0xb7, 0x3a, 0x7a, 0xfd, 0xb1, 0xf0, 0x07, 0x5c, 0xc9, 0xc4, 0x84, 0xdc, 0xc4, 0x1f, 0x83, - 0xfe, 0x1f, 0xfa, 0x1c, 0xa2, 0x3d, 0x15, 0x83, 0x57, 0xb6, 0x77, 0x87, 0x6c, 0x1b, 0x3f, 0xb2, - 0xe5, 0xd7, 0x2e, 0x62, 0xf0, 0xdc, 0x56, 0x89, 0xde, 0xcb, 0x57, 0xd4, 0x80, 0xf0, 0x37, 0x68, - 0x5f, 0x69, 0xa6, 0x13, 0x75, 0xd4, 0x30, 0x48, 0x67, 0x77, 0xa4, 0x91, 0xb9, 0x9d, 0x12, 0xba, - 0x5f, 0xac, 0x69, 0x89, 0xeb, 0xff, 0x61, 0xa1, 0xb7, 0xb6, 0xa8, 0x1e, 0x71, 0xa5, 0xf1, 0x77, - 0x1b, 0x3e, 0x91, 0xdd, 0x7c, 0xca, 0xd5, 0xc6, 0xa5, 0xc3, 0xb2, 0xea, 0xc1, 0x3c, 0xb2, 0xe2, - 0xd1, 0x97, 0xa8, 0xc9, 0x35, 0x84, 0xf9, 0x0c, 0x34, 0x8e, 0x6f, 0xdd, 0x7d, 0x77, 0xe7, 0x8e, - 0xdc, 0x76, 0x49, 0x6d, 0x3e, 0xcc, 0xf5, 0xb4, 0xc0, 0xf4, 0xff, 0xac, 0x6f, 0xed, 0x24, 0x37, - 0x11, 0x3f, 0x41, 0xad, 0x90, 0x47, 0xf7, 0x53, 0xc6, 0x03, 0x36, 0x0a, 0xe0, 0x85, 0xa7, 0x9e, - 0x5f, 0x19, 0x52, 0x5c, 0x19, 0xf2, 0x30, 0xd2, 0x43, 0x79, 0xa1, 0x25, 0x8f, 0xc6, 0xee, 0x61, - 0x36, 0xeb, 0xb5, 0xce, 0x57, 0x48, 0xb4, 0xc2, 0xc5, 0xdf, 0xa3, 0x03, 0x05, 0x01, 0x78, 0x5a, - 0xc8, 0x97, 0x1b, 0xed, 0x47, 0x6c, 0x04, 0xc1, 0x45, 0x29, 0x75, 0x5b, 0xb9, 0x65, 0xf3, 0x15, - 0x5d, 0x20, 0x71, 0x80, 0x3a, 0x21, 0x7b, 0xfa, 0x75, 0xc4, 0x16, 0x8d, 0x34, 0xfe, 0x63, 0x23, - 0x38, 0x9b, 0xf5, 0x3a, 0xe7, 0x15, 0x16, 0x5d, 0x63, 0xf7, 0x7f, 0x6f, 0xa2, 0xb7, 0x6f, 0x1c, - 0x28, 0xfc, 0x05, 0xc2, 0x62, 0xa4, 0x40, 0xa6, 0xe0, 0x7f, 0x56, 0x3c, 0x2a, 0x5c, 0x44, 0xc6, - 0xd8, 0x86, 0xdb, 0x2d, 0x0f, 0x08, 0x0f, 0x37, 0x32, 0xe8, 0x16, 0x15, 0xfe, 0x19, 0xb5, 0xfd, - 0xa2, 0x0a, 0xf8, 0x8f, 0x85, 0x3f, 0x1f, 0x09, 0xf7, 0x25, 0x87, 0x9c, 0x0c, 0x56, 0x21, 0x67, - 0x91, 0x96, 0x53, 0xf7, 0x8d, 0xf2, 0x57, 0xda, 0x95, 0x3d, 0x5a, 0xad, 0x97, 0x37, 0xe3, 0x2f, - 0x90, 0xea, 0x7e, 0x10, 0x88, 0x9f, 0xc0, 0x37, 0xe6, 0x36, 0x97, 0xcd, 0x0c, 0x36, 0x32, 0xe8, - 0x16, 0x15, 0xfe, 0x04, 0x75, 0xbc, 0x44, 0x4a, 0x88, 0xf4, 0xe7, 0xc0, 0x02, 0x7d, 0x39, 0x3d, - 0xda, 0x33, 0x9c, 0x37, 0x4b, 0x4e, 0xe7, 0x41, 0x65, 0x97, 0xae, 0x65, 0xe7, 0x7a, 0x1f, 0x14, - 0x97, 0xe0, 0xcf, 0xf5, 0xcd, 0xaa, 0x7e, 0x50, 0xd9, 0xa5, 0x6b, 0xd9, 0xf8, 0x14, 0xb5, 0xe0, - 0x69, 0x0c, 0xde, 0xdc, 0xcb, 0x7d, 0xa3, 0xbe, 0x5d, 0xaa, 0x5b, 0x67, 0x2b, 0x7b, 0xb4, 0x92, - 0x89, 0x3d, 0x84, 0x3c, 0x11, 0xf9, 0xbc, 0x78, 0x9a, 0x5f, 0x31, 0x67, 0xe0, 0xec, 0x36, 0xbf, - 0x0f, 0xe6, 0xba, 0xe5, 0xc3, 0xb8, 0x08, 0x29, 0xba, 0x82, 0xed, 0x06, 0x08, 0x6f, 0x1e, 0x13, - 0x3e, 0x44, 0x8d, 0x09, 0x4c, 0xcd, 0xf8, 0xbc, 0x4a, 0xf3, 0x4f, 0xfc, 0x29, 0x6a, 0xa6, 0x2c, - 0x48, 0xa0, 0xbc, 0x47, 0xef, 0xed, 0xf6, 0x1f, 0x5f, 0xf1, 0x10, 0x68, 0x21, 0xfc, 0xa8, 0x7e, - 0x6a, 0xb9, 0xc7, 0x57, 0xd7, 0x76, 0xed, 0xd9, 0xb5, 0x5d, 0x7b, 0x7e, 0x6d, 0xd7, 0x7e, 0xc9, - 0x6c, 0xeb, 0x2a, 0xb3, 0xad, 0x67, 0x99, 0x6d, 0x3d, 0xcf, 0x6c, 0xeb, 0xef, 0xcc, 0xb6, 0x7e, - 0xfd, 0xc7, 0xae, 0x7d, 0x5b, 0x4f, 0x4f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xce, 0x1b, 0x9d, - 0x9f, 0x62, 0x08, 0x00, 0x00, +var fileDescriptor_204bc6fa48ff56f7 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x4d, 0x8f, 0xdb, 0x44, + 0x18, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x4e, 0x93, 0x68, 0x19, 0x16, 0x58, 0x72, 0x70, 0xaa, 0x88, + 0xc3, 0x82, 0xd4, 0x31, 0xdb, 0x22, 0xb4, 0xea, 0x01, 0xb5, 0x6e, 0x56, 0x50, 0xd4, 0x25, 0xab, + 0xd9, 0x56, 0x48, 0x08, 0x24, 0x26, 0xf6, 0xd3, 0x64, 0x58, 0xdb, 0x63, 0x79, 0xc6, 0xa1, 0x39, + 0xd1, 0x8f, 0xc0, 0x57, 0xe0, 0xa3, 0x70, 0x62, 0x8f, 0xe5, 0x56, 0x71, 0x88, 0x58, 0xf3, 0x2d, + 0x38, 0x21, 0x8f, 0x9d, 0x17, 0x27, 0x0e, 0xcd, 0x72, 0xe8, 0xcd, 0xf3, 0xcc, 0xf3, 0xff, 0x3d, + 0xf3, 0xbc, 0xcc, 0x24, 0xe8, 0xc3, 0x8b, 0x63, 0x49, 0xb8, 0xb0, 0x58, 0xc8, 0xad, 0x50, 0x78, + 0xdc, 0x19, 0x5b, 0xa3, 0x23, 0x6b, 0x00, 0x01, 0x44, 0x4c, 0x81, 0x4b, 0xc2, 0x48, 0x28, 0x81, + 0xf7, 0x33, 0x2f, 0xc2, 0x42, 0x4e, 0x32, 0x2f, 0x32, 0x3a, 0x6a, 0xdd, 0x1e, 0x70, 0x35, 0x8c, + 0xfb, 0xc4, 0x11, 0xbe, 0x35, 0x10, 0x03, 0x61, 0x69, 0xe7, 0x7e, 0xfc, 0x4c, 0xaf, 0xf4, 0x42, + 0x7f, 0x65, 0x90, 0xd6, 0xa7, 0xf3, 0x50, 0x3e, 0x73, 0x86, 0x3c, 0x80, 0x68, 0x6c, 0x85, 0x17, + 0x83, 0xd4, 0x20, 0x2d, 0x1f, 0x14, 0x2b, 0x09, 0xdd, 0xb2, 0xd6, 0xa9, 0xa2, 0x38, 0x50, 0xdc, + 0x87, 0x15, 0xc1, 0x67, 0xaf, 0x13, 0x48, 0x67, 0x08, 0x3e, 0x5b, 0xd1, 0xdd, 0x5d, 0xa7, 0x8b, + 0x15, 0xf7, 0x2c, 0x1e, 0x28, 0xa9, 0xa2, 0x65, 0x51, 0xe7, 0x4f, 0x03, 0xed, 0x9e, 0x8c, 0xb8, + 0xa3, 0xb8, 0x08, 0xf0, 0x0f, 0x68, 0x37, 0xcd, 0xc2, 0x65, 0x8a, 0x1d, 0x18, 0xb7, 0x8c, 0xc3, + 0x9b, 0x77, 0x3e, 0x21, 0xf3, 0xc2, 0xcd, 0xa0, 0x24, 0xbc, 0x18, 0xa4, 0x06, 0x49, 0x52, 0x6f, + 0x32, 0x3a, 0x22, 0xbd, 0xfe, 0x8f, 0xe0, 0xa8, 0x53, 0x50, 0xcc, 0xc6, 0x97, 0x93, 0x76, 0x25, + 0x99, 0xb4, 0xd1, 0xdc, 0x46, 0x67, 0x54, 0xec, 0xa1, 0x86, 0x0b, 0x1e, 0x28, 0xe8, 0x85, 0x69, + 0x44, 0x79, 0xb0, 0xa5, 0xc3, 0xdc, 0xdd, 0x2c, 0x4c, 0x77, 0x51, 0x6a, 0xbf, 0x9d, 0x4c, 0xda, + 0x8d, 0x82, 0x89, 0x16, 0xe1, 0x9d, 0x5f, 0xb7, 0xd0, 0x3b, 0x67, 0xc2, 0xed, 0x72, 0x19, 0xc5, + 0xda, 0x64, 0xc7, 0xee, 0x00, 0xd4, 0x1b, 0xc8, 0xb3, 0x87, 0xb6, 0x65, 0x08, 0x4e, 0x9e, 0xde, + 0x6d, 0x52, 0x36, 0x7e, 0xa4, 0xe4, 0x68, 0xe7, 0x21, 0x38, 0x76, 0x3d, 0x47, 0x6f, 0xa7, 0x2b, + 0xaa, 0x41, 0xf8, 0x1b, 0xb4, 0x23, 0x15, 0x53, 0xb1, 0x3c, 0xa8, 0x6a, 0xa4, 0xb5, 0x39, 0x52, + 0xcb, 0xec, 0x66, 0x0e, 0xdd, 0xc9, 0xd6, 0x34, 0xc7, 0x75, 0x7e, 0x37, 0xd0, 0xfb, 0x25, 0xaa, + 0xc7, 0x5c, 0x2a, 0xfc, 0xdd, 0x4a, 0x9d, 0xc8, 0x66, 0x75, 0x4a, 0xd5, 0xba, 0x4a, 0x7b, 0x79, + 0xd4, 0xdd, 0xa9, 0x65, 0xa1, 0x46, 0x5f, 0xa3, 0x1a, 0x57, 0xe0, 0xa7, 0x33, 0x50, 0x3d, 0xbc, + 0x79, 0xe7, 0xa3, 0x8d, 0x33, 0xb2, 0x1b, 0x39, 0xb5, 0xf6, 0x28, 0xd5, 0xd3, 0x0c, 0xd3, 0xf9, + 0xa3, 0x5a, 0x9a, 0x49, 0x5a, 0x44, 0xfc, 0x0c, 0xd5, 0x7d, 0x1e, 0x3c, 0x18, 0x31, 0xee, 0xb1, + 0xbe, 0x07, 0xaf, 0xed, 0x7a, 0x7a, 0x65, 0x48, 0x76, 0x65, 0xc8, 0xa3, 0x40, 0xf5, 0xa2, 0x73, + 0x15, 0xf1, 0x60, 0x60, 0xef, 0x25, 0x93, 0x76, 0xfd, 0x74, 0x81, 0x44, 0x0b, 0x5c, 0xfc, 0x3d, + 0xda, 0x95, 0xe0, 0x81, 0xa3, 0x44, 0x74, 0xbd, 0xd1, 0x7e, 0xcc, 0xfa, 0xe0, 0x9d, 0xe7, 0x52, + 0xbb, 0x9e, 0x96, 0x6c, 0xba, 0xa2, 0x33, 0x24, 0xf6, 0x50, 0xd3, 0x67, 0xcf, 0x9f, 0x06, 0x6c, + 0x96, 0x48, 0xf5, 0x7f, 0x26, 0x82, 0x93, 0x49, 0xbb, 0x79, 0x5a, 0x60, 0xd1, 0x25, 0x36, 0x7e, + 0x61, 0xa0, 0x56, 0x1c, 0x0c, 0x81, 0x79, 0x6a, 0x38, 0x3e, 0x13, 0xee, 0xf4, 0x9d, 0x38, 0xd3, + 0xcd, 0x39, 0xd8, 0xbe, 0x65, 0x1c, 0xde, 0xb0, 0xef, 0x27, 0x93, 0x76, 0xeb, 0xe9, 0x5a, 0xaf, + 0x7f, 0x26, 0x6d, 0x73, 0xfd, 0xee, 0x93, 0x71, 0x08, 0xf4, 0x3f, 0x62, 0x74, 0x7e, 0xab, 0xa1, + 0x0f, 0xd6, 0xce, 0x34, 0xfe, 0x0a, 0x61, 0xd1, 0x97, 0x10, 0x8d, 0xc0, 0xfd, 0x22, 0x7b, 0xd7, + 0xb8, 0x08, 0x74, 0x6f, 0xab, 0x76, 0x2b, 0x9f, 0x11, 0xdc, 0x5b, 0xf1, 0xa0, 0x25, 0x2a, 0xfc, + 0x33, 0x6a, 0xb8, 0x59, 0x14, 0x70, 0xcf, 0x84, 0x3b, 0x9d, 0x4a, 0xfb, 0x9a, 0xf7, 0x8c, 0x74, + 0x17, 0x21, 0x27, 0x81, 0x8a, 0xc6, 0xf6, 0xbb, 0xf9, 0x51, 0x1a, 0x85, 0x3d, 0x5a, 0x8c, 0x97, + 0x26, 0xe3, 0xce, 0x90, 0xf2, 0x81, 0xe7, 0x89, 0x9f, 0xc0, 0xd5, 0xfd, 0xad, 0xcd, 0x93, 0xe9, + 0xae, 0x78, 0xd0, 0x12, 0x15, 0xfe, 0x1c, 0x35, 0x9d, 0x38, 0x8a, 0x20, 0x50, 0x5f, 0x66, 0x95, + 0xd5, 0xcd, 0xaa, 0xd9, 0xef, 0xe5, 0x9c, 0xe6, 0xc3, 0xc2, 0x2e, 0x5d, 0xf2, 0x4e, 0xf5, 0x2e, + 0x48, 0x1e, 0x81, 0x3b, 0xd5, 0xd7, 0x8a, 0xfa, 0x6e, 0x61, 0x97, 0x2e, 0x79, 0xe3, 0x63, 0x54, + 0x87, 0xe7, 0x21, 0x38, 0xd3, 0x5a, 0xee, 0x68, 0xf5, 0x7e, 0xae, 0xae, 0x9f, 0x2c, 0xec, 0xd1, + 0x82, 0x27, 0x76, 0x10, 0x72, 0x44, 0xe0, 0xf2, 0xec, 0xd7, 0xe1, 0x2d, 0xdd, 0x03, 0x6b, 0xb3, + 0x2b, 0xf4, 0x70, 0xaa, 0x9b, 0xbf, 0xcd, 0x33, 0x93, 0xa4, 0x0b, 0xd8, 0x96, 0x87, 0xf0, 0x6a, + 0x9b, 0xf0, 0x1e, 0xaa, 0x5e, 0xc0, 0x58, 0x8f, 0xcf, 0x0d, 0x9a, 0x7e, 0xe2, 0xfb, 0xa8, 0x36, + 0x62, 0x5e, 0x0c, 0xf9, 0x55, 0xfe, 0x78, 0xb3, 0x73, 0x3c, 0xe1, 0x3e, 0xd0, 0x4c, 0x78, 0x6f, + 0xeb, 0xd8, 0xb0, 0xef, 0x5d, 0x5e, 0x99, 0x95, 0x97, 0x57, 0x66, 0xe5, 0xd5, 0x95, 0x59, 0x79, + 0x91, 0x98, 0xc6, 0x65, 0x62, 0x1a, 0x2f, 0x13, 0xd3, 0x78, 0x95, 0x98, 0xc6, 0x5f, 0x89, 0x69, + 0xfc, 0xf2, 0xb7, 0x59, 0xf9, 0x76, 0xbf, 0xec, 0x7f, 0xcc, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x0f, 0x42, 0xd2, 0x33, 0xde, 0x08, 0x00, 0x00, } func (m *Eviction) Marshal() (dAtA []byte, err error) { @@ -420,6 +422,13 @@ func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.UnhealthyPodEvictionPolicy != nil { + i -= len(*m.UnhealthyPodEvictionPolicy) + copy(dAtA[i:], *m.UnhealthyPodEvictionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UnhealthyPodEvictionPolicy))) + i-- + dAtA[i] = 0x22 + } if m.MaxUnavailable != nil { { size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) @@ -616,6 +625,10 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { l = m.MaxUnavailable.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.UnhealthyPodEvictionPolicy != nil { + l = len(*m.UnhealthyPodEvictionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -701,6 +714,7 @@ func (this *PodDisruptionBudgetSpec) String() string { `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, `}`, }, "") return s @@ -1266,6 +1280,39 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) + m.UnhealthyPodEvictionPolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto index 5b7984235..57128e811 100644 --- a/vendor/k8s.io/api/policy/v1/generated.proto +++ b/vendor/k8s.io/api/policy/v1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/policy/v1"; // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is @@ -35,11 +35,11 @@ option go_package = "v1"; message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -47,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -63,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of PodDisruptionBudgets repeated PodDisruptionBudget items = 2; @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -84,14 +84,42 @@ message PodDisruptionBudgetSpec { // all pods within the namespace. // +patchStrategy=replace // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + optional string unhealthyPodEvictionPolicy = 4; } // PodDisruptionBudgetStatus represents information about the status of a @@ -114,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -146,6 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go index 4a03696f0..f05367ebe 100644 --- a/vendor/k8s.io/api/policy/v1/types.go +++ b/vendor/k8s.io/api/policy/v1/types.go @@ -47,8 +47,56 @@ type PodDisruptionBudgetSpec struct { // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } +// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods +// should be considered for eviction. +// +enum +type UnhealthyPodEvictionPolicyType string + +const ( + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget" + + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow" +) + // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { @@ -122,6 +170,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { @@ -140,6 +189,7 @@ type PodDisruptionBudget struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { @@ -155,6 +205,7 @@ type PodDisruptionBudgetList struct { // +genclient // +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.22 // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go index 3208392e8..799b0794a 100644 --- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Eviction = map[string]string{ @@ -59,10 +59,11 @@ func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { } var map_PodDisruptionBudgetSpec = map[string]string{ - "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", - "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", - "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", - "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", + "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go index 485e1c938..74b4f3a3a 100644 --- a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go @@ -137,6 +137,11 @@ func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) { *out = new(intstr.IntOrString) **out = **in } + if in.UnhealthyPodEvictionPolicy != nil { + in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy + *out = new(UnhealthyPodEvictionPolicyType) + **out = **in + } return } diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..d6663b923 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Eviction) APILifecycleIntroduced() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudget) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudgetList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 9e9c7d13a..76da54b4c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -20,6 +20,6 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// they aren't all here, are PodDisruptionBudget, // NetworkPolicy, etc. package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index 9cce671df..c3845e994 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto +// source: k8s.io/api/policy/v1beta1/generated.proto package v1beta1 @@ -26,8 +26,6 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -49,94 +47,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{3} + return fileDescriptor_68b366237812cc96, []int{0} } func (m *Eviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,94 +75,10 @@ func (m *Eviction) XXX_DiscardUnknown() { var xxx_messageInfo_Eviction proto.InternalMessageInfo -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{4} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{5} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{6} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{7} + return fileDescriptor_68b366237812cc96, []int{1} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{8} + return fileDescriptor_68b366237812cc96, []int{2} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{9} + return fileDescriptor_68b366237812cc96, []int{3} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -332,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{10} + return fileDescriptor_68b366237812cc96, []int{4} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,381 +187,77 @@ func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{11} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{12} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{13} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{14} -} -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) -} -func (m *RunAsGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{15} -} -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) -} -func (m *RunAsUserStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{16} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{17} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo - -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{18} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo - func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.policy.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.policy.v1beta1.IDRange") proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) + proto.RegisterFile("k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_68b366237812cc96) +} + +var fileDescriptor_68b366237812cc96 = []byte{ + // 843 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x4d, 0x8f, 0xdb, 0x44, + 0x18, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x0e, 0x49, 0xb4, 0x0c, 0x6f, 0xbb, 0x39, 0x38, 0x55, 0x4e, + 0x05, 0x89, 0x31, 0xdb, 0x56, 0x68, 0xc5, 0x01, 0x5a, 0x37, 0xab, 0x52, 0xd4, 0xd5, 0xae, 0x26, + 0xdb, 0x0b, 0x2a, 0x12, 0x13, 0xfb, 0xa9, 0x33, 0xc4, 0xf6, 0x58, 0x9e, 0x71, 0x68, 0x6e, 0x3d, + 0xf0, 0x01, 0xf8, 0x1e, 0x7c, 0x10, 0xf6, 0xc0, 0xa1, 0xdc, 0x2a, 0x0e, 0x11, 0x6b, 0xbe, 0x05, + 0x27, 0xe4, 0xb1, 0xf3, 0xe2, 0xbc, 0xd0, 0xb4, 0x07, 0x6e, 0x9e, 0x67, 0x9e, 0xff, 0xef, 0x99, + 0xe7, 0x65, 0x26, 0x41, 0x9f, 0x0c, 0x4f, 0x24, 0xe1, 0xc2, 0x62, 0x11, 0xb7, 0x22, 0xe1, 0x73, + 0x67, 0x6c, 0x8d, 0x8e, 0xfb, 0xa0, 0xd8, 0xb1, 0xe5, 0x41, 0x08, 0x31, 0x53, 0xe0, 0x92, 0x28, + 0x16, 0x4a, 0xe0, 0xa3, 0xdc, 0x95, 0xb0, 0x88, 0x93, 0xdc, 0x95, 0x14, 0xae, 0xad, 0xcf, 0x3c, + 0xae, 0x06, 0x49, 0x9f, 0x38, 0x22, 0xb0, 0x3c, 0xe1, 0x09, 0x4b, 0x2b, 0xfa, 0xc9, 0x33, 0xbd, + 0xd2, 0x0b, 0xfd, 0x95, 0x93, 0x5a, 0x77, 0xe7, 0x41, 0x03, 0xe6, 0x0c, 0x78, 0x08, 0xf1, 0xd8, + 0x8a, 0x86, 0x5e, 0x66, 0x90, 0x56, 0x00, 0x8a, 0x59, 0xa3, 0x95, 0xf8, 0x2d, 0x6b, 0x93, 0x2a, + 0x4e, 0x42, 0xc5, 0x03, 0x58, 0x11, 0x7c, 0xf1, 0x3a, 0x81, 0x74, 0x06, 0x10, 0xb0, 0x15, 0xdd, + 0x9d, 0x4d, 0xba, 0x44, 0x71, 0xdf, 0xe2, 0xa1, 0x92, 0x2a, 0x5e, 0x16, 0x75, 0xfe, 0x34, 0xd0, + 0xfe, 0xe9, 0x88, 0x3b, 0x8a, 0x8b, 0x10, 0xff, 0x80, 0xf6, 0xb3, 0x2c, 0x5c, 0xa6, 0xd8, 0xa1, + 0x71, 0xd3, 0xb8, 0xf5, 0xee, 0xed, 0xcf, 0xc9, 0xbc, 0x7a, 0x33, 0x28, 0x89, 0x86, 0x5e, 0x66, + 0x90, 0x24, 0xf3, 0x26, 0xa3, 0x63, 0x72, 0xde, 0xff, 0x11, 0x1c, 0x75, 0x06, 0x8a, 0xd9, 0xf8, + 0x6a, 0xd2, 0xae, 0xa4, 0x93, 0x36, 0x9a, 0xdb, 0xe8, 0x8c, 0x8a, 0x7d, 0xd4, 0x70, 0xc1, 0x07, + 0x05, 0xe7, 0x51, 0x16, 0x51, 0x1e, 0xee, 0xe8, 0x30, 0x77, 0xb6, 0x0b, 0xd3, 0x5d, 0x94, 0xda, + 0xef, 0xa5, 0x93, 0x76, 0xa3, 0x64, 0xa2, 0x65, 0x78, 0xe7, 0xd7, 0x1d, 0xf4, 0xfe, 0x85, 0x70, + 0xbb, 0x5c, 0xc6, 0x89, 0x36, 0xd9, 0x89, 0xeb, 0x81, 0xfa, 0x1f, 0xf2, 0xbc, 0x44, 0xbb, 0x32, + 0x02, 0xa7, 0x48, 0xef, 0x36, 0xd9, 0x38, 0x83, 0x64, 0xcd, 0xf9, 0x7a, 0x11, 0x38, 0x76, 0xbd, + 0xe0, 0xef, 0x66, 0x2b, 0xaa, 0x69, 0xf8, 0x29, 0xda, 0x93, 0x8a, 0xa9, 0x44, 0x1e, 0x56, 0x35, + 0xf7, 0xee, 0x1b, 0x72, 0xb5, 0xd6, 0x6e, 0x16, 0xe4, 0xbd, 0x7c, 0x4d, 0x0b, 0x66, 0xe7, 0x77, + 0x03, 0x7d, 0xbc, 0x46, 0xf5, 0x98, 0x4b, 0x85, 0x9f, 0xae, 0x54, 0x8c, 0x6c, 0x57, 0xb1, 0x4c, + 0xad, 0xeb, 0x75, 0x50, 0x44, 0xdd, 0x9f, 0x5a, 0x16, 0xaa, 0xd5, 0x43, 0x35, 0xae, 0x20, 0xc8, + 0xa6, 0xa1, 0xba, 0x84, 0xde, 0x22, 0x2d, 0xbb, 0x51, 0xa0, 0x6b, 0x8f, 0x32, 0x08, 0xcd, 0x59, + 0x9d, 0x3f, 0xaa, 0x6b, 0xd3, 0xc9, 0xca, 0x89, 0x9f, 0xa1, 0x7a, 0xc0, 0xc3, 0xfb, 0x23, 0xc6, + 0x7d, 0xd6, 0xf7, 0xe1, 0xb5, 0x43, 0x90, 0xdd, 0x20, 0x92, 0xdf, 0x20, 0xf2, 0x28, 0x54, 0xe7, + 0x71, 0x4f, 0xc5, 0x3c, 0xf4, 0xec, 0x83, 0x74, 0xd2, 0xae, 0x9f, 0x2d, 0x90, 0x68, 0x89, 0x8b, + 0xbf, 0x47, 0xfb, 0x12, 0x7c, 0x70, 0x94, 0x88, 0xdf, 0x6c, 0xd2, 0x1f, 0xb3, 0x3e, 0xf8, 0xbd, + 0x42, 0x6a, 0xd7, 0xb3, 0xba, 0x4d, 0x57, 0x74, 0x86, 0xc4, 0x3e, 0x6a, 0x06, 0xec, 0xf9, 0x93, + 0x90, 0xcd, 0x12, 0xa9, 0xbe, 0x65, 0x22, 0x38, 0x9d, 0xb4, 0x9b, 0x67, 0x25, 0x16, 0x5d, 0x62, + 0xe3, 0x17, 0x06, 0x6a, 0x25, 0xe1, 0x00, 0x98, 0xaf, 0x06, 0xe3, 0x0b, 0xe1, 0x4e, 0x9f, 0x8d, + 0x0b, 0xdd, 0xa1, 0xc3, 0xdd, 0x9b, 0xc6, 0xad, 0x1b, 0xf6, 0xbd, 0x74, 0xd2, 0x6e, 0x3d, 0xd9, + 0xe8, 0xf5, 0xcf, 0xa4, 0x6d, 0x6e, 0xde, 0xbd, 0x1c, 0x47, 0x40, 0xff, 0x23, 0x46, 0xe7, 0xb7, + 0x1a, 0x3a, 0xda, 0x38, 0xd8, 0xf8, 0x5b, 0x84, 0x45, 0x5f, 0x42, 0x3c, 0x02, 0xf7, 0x61, 0xfe, + 0xcc, 0x71, 0x11, 0xea, 0xde, 0x56, 0xed, 0x56, 0x31, 0x23, 0xf8, 0x7c, 0xc5, 0x83, 0xae, 0x51, + 0xe1, 0x9f, 0x0d, 0xd4, 0x70, 0xf3, 0x30, 0xe0, 0x5e, 0x08, 0x77, 0x3a, 0x9b, 0x0f, 0xdf, 0xe6, + 0xca, 0x91, 0xee, 0x22, 0xe9, 0x34, 0x54, 0xf1, 0xd8, 0xfe, 0xb0, 0x38, 0x50, 0xa3, 0xb4, 0x47, + 0xcb, 0x41, 0xb3, 0x94, 0xdc, 0x19, 0x52, 0xde, 0xf7, 0x7d, 0xf1, 0x13, 0xb8, 0xba, 0xcb, 0xb5, + 0x79, 0x4a, 0xdd, 0x15, 0x0f, 0xba, 0x46, 0x85, 0xbf, 0x42, 0x4d, 0x27, 0x89, 0x63, 0x08, 0xd5, + 0x37, 0x79, 0x7d, 0x75, 0xcb, 0x6a, 0xf6, 0x47, 0x05, 0xa7, 0xf9, 0xa0, 0xb4, 0x4b, 0x97, 0xbc, + 0x33, 0xbd, 0x0b, 0x92, 0xc7, 0xe0, 0x4e, 0xf5, 0xb5, 0xb2, 0xbe, 0x5b, 0xda, 0xa5, 0x4b, 0xde, + 0xf8, 0x04, 0xd5, 0xe1, 0x79, 0x04, 0xce, 0xb4, 0xa0, 0x7b, 0x5a, 0xfd, 0x41, 0xa1, 0xae, 0x9f, + 0x2e, 0xec, 0xd1, 0x92, 0x27, 0x76, 0x10, 0x72, 0x44, 0xe8, 0xf2, 0xfc, 0x27, 0xe3, 0x1d, 0xdd, + 0x08, 0x6b, 0xbb, 0x8b, 0xf4, 0x60, 0xaa, 0x9b, 0x3f, 0xd8, 0x33, 0x93, 0xa4, 0x0b, 0xd8, 0x96, + 0x8f, 0xf0, 0x6a, 0x9b, 0xf0, 0x01, 0xaa, 0x0e, 0x61, 0xac, 0x87, 0xe8, 0x06, 0xcd, 0x3e, 0xf1, + 0x3d, 0x54, 0x1b, 0x31, 0x3f, 0x81, 0xe2, 0x42, 0x7f, 0xba, 0xdd, 0x39, 0x2e, 0x79, 0x00, 0x34, + 0x17, 0x7e, 0xb9, 0x73, 0x62, 0xd8, 0x5f, 0x5f, 0x5d, 0x9b, 0x95, 0x97, 0xd7, 0x66, 0xe5, 0xd5, + 0xb5, 0x59, 0x79, 0x91, 0x9a, 0xc6, 0x55, 0x6a, 0x1a, 0x2f, 0x53, 0xd3, 0x78, 0x95, 0x9a, 0xc6, + 0x5f, 0xa9, 0x69, 0xfc, 0xf2, 0xb7, 0x59, 0xf9, 0xee, 0x68, 0xe3, 0xdf, 0x9c, 0x7f, 0x03, 0x00, + 0x00, 0xff, 0xff, 0x3c, 0xbe, 0x15, 0xfb, 0x02, 0x09, 0x00, 0x00, } -var fileDescriptor_014060e454a820dc = []byte{ - // 1904 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0xdc, 0xc6, - 0x15, 0x16, 0xbd, 0xba, 0xac, 0x46, 0x17, 0x6b, 0x47, 0x17, 0xd3, 0x4a, 0xb3, 0x74, 0x18, 0xa0, - 0x70, 0xd3, 0x84, 0x1b, 0xcb, 0x8e, 0x6b, 0x34, 0x6d, 0x11, 0x51, 0x2b, 0xd9, 0x0a, 0x2c, 0x6b, - 0x3b, 0x6b, 0x07, 0x6d, 0xe1, 0x16, 0x9d, 0x25, 0x47, 0x2b, 0x46, 0x5c, 0x92, 0xe5, 0x0c, 0x37, - 0xda, 0xb7, 0x3e, 0xf4, 0xa1, 0x8f, 0xfd, 0x03, 0x41, 0x7f, 0x40, 0xd1, 0xa7, 0xfe, 0x88, 0x3a, - 0x40, 0x11, 0xe4, 0x31, 0xe8, 0xc3, 0xa2, 0xde, 0xfe, 0x0b, 0x3f, 0x05, 0x9c, 0x1d, 0x72, 0x79, - 0xdd, 0xb5, 0x0d, 0xd8, 0x6f, 0xe4, 0x9c, 0xef, 0xfb, 0xce, 0xcc, 0x99, 0x33, 0x67, 0x2e, 0x40, - 0xbf, 0xb8, 0x47, 0x35, 0xcb, 0x6d, 0x5c, 0x04, 0x1d, 0xe2, 0x3b, 0x84, 0x11, 0xda, 0xe8, 0x13, - 0xc7, 0x74, 0xfd, 0x86, 0x30, 0x60, 0xcf, 0x6a, 0x78, 0xae, 0x6d, 0x19, 0x83, 0x46, 0xff, 0x56, - 0x87, 0x30, 0x7c, 0xab, 0xd1, 0x25, 0x0e, 0xf1, 0x31, 0x23, 0xa6, 0xe6, 0xf9, 0x2e, 0x73, 0xe1, - 0xf5, 0x31, 0x54, 0xc3, 0x9e, 0xa5, 0x8d, 0xa1, 0x9a, 0x80, 0xee, 0x7e, 0xd4, 0xb5, 0xd8, 0x79, - 0xd0, 0xd1, 0x0c, 0xb7, 0xd7, 0xe8, 0xba, 0x5d, 0xb7, 0xc1, 0x19, 0x9d, 0xe0, 0x8c, 0xff, 0xf1, - 0x1f, 0xfe, 0x35, 0x56, 0xda, 0x55, 0x13, 0x4e, 0x0d, 0xd7, 0x27, 0x8d, 0x7e, 0xce, 0xdb, 0xee, - 0x9d, 0x09, 0xa6, 0x87, 0x8d, 0x73, 0xcb, 0x21, 0xfe, 0xa0, 0xe1, 0x5d, 0x74, 0xc3, 0x06, 0xda, - 0xe8, 0x11, 0x86, 0x8b, 0x58, 0x8d, 0x32, 0x96, 0x1f, 0x38, 0xcc, 0xea, 0x91, 0x1c, 0xe1, 0xee, - 0x2c, 0x02, 0x35, 0xce, 0x49, 0x0f, 0xe7, 0x78, 0xb7, 0xcb, 0x78, 0x01, 0xb3, 0xec, 0x86, 0xe5, - 0x30, 0xca, 0xfc, 0x2c, 0x49, 0xbd, 0x03, 0x36, 0xf6, 0x6d, 0xdb, 0xfd, 0x8a, 0x98, 0x07, 0xed, - 0xe3, 0xa6, 0x6f, 0xf5, 0x89, 0x0f, 0x6f, 0x80, 0x79, 0x07, 0xf7, 0x88, 0x2c, 0xdd, 0x90, 0x6e, - 0x2e, 0xeb, 0xab, 0xcf, 0x86, 0xca, 0xdc, 0x68, 0xa8, 0xcc, 0x3f, 0xc2, 0x3d, 0x82, 0xb8, 0x45, - 0xfd, 0x14, 0xd4, 0x04, 0xeb, 0xc8, 0x26, 0x97, 0x5f, 0xb8, 0x76, 0xd0, 0x23, 0xf0, 0xc7, 0x60, - 0xd1, 0xe4, 0x02, 0x82, 0xb8, 0x2e, 0x88, 0x8b, 0x63, 0x59, 0x24, 0xac, 0x2a, 0x05, 0x57, 0x05, - 0xf9, 0x81, 0x4b, 0x59, 0x0b, 0xb3, 0x73, 0xb8, 0x07, 0x80, 0x87, 0xd9, 0x79, 0xcb, 0x27, 0x67, - 0xd6, 0xa5, 0xa0, 0x43, 0x41, 0x07, 0xad, 0xd8, 0x82, 0x12, 0x28, 0xf8, 0x21, 0xa8, 0xfa, 0x04, - 0x9b, 0xa7, 0x8e, 0x3d, 0x90, 0xaf, 0xdc, 0x90, 0x6e, 0x56, 0xf5, 0x0d, 0xc1, 0xa8, 0x22, 0xd1, - 0x8e, 0x62, 0x84, 0xfa, 0x5f, 0x09, 0x54, 0x0f, 0xfb, 0x96, 0xc1, 0x2c, 0xd7, 0x81, 0x7f, 0x04, - 0xd5, 0x70, 0xb6, 0x4c, 0xcc, 0x30, 0x77, 0xb6, 0xb2, 0xf7, 0xb1, 0x36, 0xc9, 0xa4, 0x38, 0x78, - 0x9a, 0x77, 0xd1, 0x0d, 0x1b, 0xa8, 0x16, 0xa2, 0xb5, 0xfe, 0x2d, 0xed, 0xb4, 0xf3, 0x25, 0x31, - 0xd8, 0x09, 0x61, 0x78, 0xd2, 0xbd, 0x49, 0x1b, 0x8a, 0x55, 0xa1, 0x0d, 0xd6, 0x4c, 0x62, 0x13, - 0x46, 0x4e, 0xbd, 0xd0, 0x23, 0xe5, 0x3d, 0x5c, 0xd9, 0xbb, 0xfd, 0x72, 0x6e, 0x9a, 0x49, 0xaa, - 0x5e, 0x1b, 0x0d, 0x95, 0xb5, 0x54, 0x13, 0x4a, 0x8b, 0xab, 0x5f, 0x4b, 0x60, 0xe7, 0xa8, 0x7d, - 0xdf, 0x77, 0x03, 0xaf, 0xcd, 0xc2, 0xd9, 0xed, 0x0e, 0x84, 0x09, 0xfe, 0x0c, 0xcc, 0xfb, 0x81, - 0x1d, 0xcd, 0xe5, 0xfb, 0xd1, 0x5c, 0xa2, 0xc0, 0x26, 0x2f, 0x86, 0xca, 0x66, 0x86, 0xf5, 0x78, - 0xe0, 0x11, 0xc4, 0x09, 0xf0, 0x73, 0xb0, 0xe8, 0x63, 0xa7, 0x4b, 0xc2, 0xae, 0x57, 0x6e, 0xae, - 0xec, 0xa9, 0x5a, 0xe9, 0x5a, 0xd3, 0x8e, 0x9b, 0x28, 0x84, 0x4e, 0x66, 0x9c, 0xff, 0x52, 0x24, - 0x14, 0xd4, 0x13, 0xb0, 0xc6, 0xa7, 0xda, 0xf5, 0x19, 0xb7, 0xc0, 0x77, 0x41, 0xa5, 0x67, 0x39, - 0xbc, 0x53, 0x0b, 0xfa, 0x8a, 0x60, 0x55, 0x4e, 0x2c, 0x07, 0x85, 0xed, 0xdc, 0x8c, 0x2f, 0x79, - 0xcc, 0x92, 0x66, 0x7c, 0x89, 0xc2, 0x76, 0xf5, 0x3e, 0x58, 0x12, 0x1e, 0x93, 0x42, 0x95, 0xe9, - 0x42, 0x95, 0x02, 0xa1, 0x7f, 0x5c, 0x01, 0x9b, 0x2d, 0xd7, 0x6c, 0x5a, 0xd4, 0x0f, 0x78, 0xbc, - 0xf4, 0xc0, 0xec, 0x12, 0xf6, 0x16, 0xf2, 0xe3, 0x31, 0x98, 0xa7, 0x1e, 0x31, 0x44, 0x5a, 0xec, - 0x4d, 0x89, 0x6d, 0x41, 0xff, 0xda, 0x1e, 0x31, 0x26, 0xcb, 0x32, 0xfc, 0x43, 0x5c, 0x0d, 0x3e, - 0x05, 0x8b, 0x94, 0x61, 0x16, 0x50, 0xb9, 0xc2, 0x75, 0xef, 0xbc, 0xa2, 0x2e, 0xe7, 0x4e, 0x66, - 0x71, 0xfc, 0x8f, 0x84, 0xa6, 0xfa, 0x1f, 0x09, 0x5c, 0x2b, 0x60, 0x3d, 0xb4, 0x28, 0x83, 0x4f, - 0x73, 0x11, 0xd3, 0x5e, 0x2e, 0x62, 0x21, 0x9b, 0xc7, 0x2b, 0x5e, 0xbc, 0x51, 0x4b, 0x22, 0x5a, - 0x6d, 0xb0, 0x60, 0x31, 0xd2, 0x8b, 0x52, 0x51, 0x7b, 0xb5, 0x61, 0xe9, 0x6b, 0x42, 0x7a, 0xe1, - 0x38, 0x14, 0x41, 0x63, 0x2d, 0xf5, 0xdb, 0x2b, 0x85, 0xc3, 0x09, 0xc3, 0x09, 0xcf, 0xc0, 0x6a, - 0xcf, 0x72, 0xf6, 0xfb, 0xd8, 0xb2, 0x71, 0x47, 0xac, 0x9e, 0x69, 0x49, 0x10, 0x56, 0x58, 0x6d, - 0x5c, 0x61, 0xb5, 0x63, 0x87, 0x9d, 0xfa, 0x6d, 0xe6, 0x5b, 0x4e, 0x57, 0xdf, 0x18, 0x0d, 0x95, - 0xd5, 0x93, 0x84, 0x12, 0x4a, 0xe9, 0xc2, 0xdf, 0x83, 0x2a, 0x25, 0x36, 0x31, 0x98, 0xeb, 0xbf, - 0x5a, 0x85, 0x78, 0x88, 0x3b, 0xc4, 0x6e, 0x0b, 0xaa, 0xbe, 0x1a, 0xc6, 0x2d, 0xfa, 0x43, 0xb1, - 0x24, 0xb4, 0xc1, 0x7a, 0x0f, 0x5f, 0x3e, 0x71, 0x70, 0x3c, 0x90, 0xca, 0x6b, 0x0e, 0x04, 0x8e, - 0x86, 0xca, 0xfa, 0x49, 0x4a, 0x0b, 0x65, 0xb4, 0xd5, 0x7f, 0x2f, 0x80, 0xeb, 0xa5, 0x59, 0x05, - 0x3f, 0x07, 0xd0, 0xed, 0x50, 0xe2, 0xf7, 0x89, 0x79, 0x7f, 0xbc, 0x07, 0x59, 0x6e, 0xb4, 0x70, - 0x77, 0xc5, 0x04, 0xc1, 0xd3, 0x1c, 0x02, 0x15, 0xb0, 0xe0, 0x5f, 0x24, 0xb0, 0x66, 0x8e, 0xdd, - 0x10, 0xb3, 0xe5, 0x9a, 0x51, 0x62, 0xdc, 0x7f, 0x9d, 0x7c, 0xd7, 0x9a, 0x49, 0xa5, 0x43, 0x87, - 0xf9, 0x03, 0x7d, 0x5b, 0x74, 0x68, 0x2d, 0x65, 0x43, 0x69, 0xa7, 0xe1, 0x90, 0xcc, 0x58, 0x92, - 0x8a, 0x3d, 0x8d, 0x87, 0x78, 0x61, 0x32, 0xa4, 0x66, 0x0e, 0x81, 0x0a, 0x58, 0xf0, 0x57, 0x60, - 0xdd, 0x08, 0x7c, 0x9f, 0x38, 0xec, 0x01, 0xc1, 0x36, 0x3b, 0x1f, 0xc8, 0xf3, 0x5c, 0x67, 0x47, - 0xe8, 0xac, 0x1f, 0xa4, 0xac, 0x28, 0x83, 0x0e, 0xf9, 0x26, 0xa1, 0x96, 0x4f, 0xcc, 0x88, 0xbf, - 0x90, 0xe6, 0x37, 0x53, 0x56, 0x94, 0x41, 0xc3, 0x7b, 0x60, 0x95, 0x5c, 0x7a, 0xc4, 0x88, 0x02, - 0xba, 0xc8, 0xd9, 0x5b, 0x82, 0xbd, 0x7a, 0x98, 0xb0, 0xa1, 0x14, 0x12, 0x1a, 0x00, 0x18, 0xae, - 0x63, 0x5a, 0xe3, 0x7d, 0x6e, 0x89, 0x4f, 0x44, 0xe3, 0xe5, 0xb2, 0xf8, 0x20, 0xe2, 0x4d, 0xaa, - 0x65, 0xdc, 0x44, 0x51, 0x42, 0x76, 0xd7, 0x06, 0x30, 0x3f, 0x4d, 0x70, 0x03, 0x54, 0x2e, 0xc8, - 0x60, 0xbc, 0xb7, 0xa1, 0xf0, 0x13, 0x7e, 0x06, 0x16, 0xfa, 0xd8, 0x0e, 0x88, 0x58, 0x4d, 0x1f, - 0xbc, 0x5c, 0x3f, 0x1e, 0x5b, 0x3d, 0x82, 0xc6, 0xc4, 0x9f, 0x5f, 0xb9, 0x27, 0xa9, 0xdf, 0x48, - 0xa0, 0xd6, 0x72, 0xcd, 0x36, 0x31, 0x02, 0xdf, 0x62, 0x83, 0x16, 0xcf, 0xa4, 0xb7, 0xb0, 0x2b, - 0xa0, 0xd4, 0xae, 0xf0, 0xf1, 0xf4, 0x6c, 0x4e, 0xf7, 0xae, 0x6c, 0x4f, 0x50, 0x9f, 0x49, 0x60, - 0x3b, 0x87, 0x7e, 0x0b, 0x35, 0xfb, 0xd7, 0xe9, 0x9a, 0xfd, 0xe1, 0xab, 0x0c, 0xa6, 0xa4, 0x62, - 0x7f, 0x53, 0x2b, 0x18, 0x0a, 0xaf, 0xd7, 0xe1, 0xf9, 0xd1, 0xb7, 0xfa, 0x96, 0x4d, 0xba, 0xc4, - 0xe4, 0x83, 0xa9, 0x26, 0xce, 0x8f, 0xb1, 0x05, 0x25, 0x50, 0x90, 0x82, 0x1d, 0x93, 0x9c, 0xe1, - 0xc0, 0x66, 0xfb, 0xa6, 0x79, 0x80, 0x3d, 0xdc, 0xb1, 0x6c, 0x8b, 0x59, 0xe2, 0xc0, 0xb3, 0xac, - 0x7f, 0x3a, 0x1a, 0x2a, 0x3b, 0xcd, 0x42, 0xc4, 0x8b, 0xa1, 0xf2, 0x6e, 0xfe, 0xbe, 0xa0, 0xc5, - 0x90, 0x01, 0x2a, 0x91, 0x86, 0x03, 0x20, 0xfb, 0xe4, 0x4f, 0x41, 0xb8, 0xf2, 0x9a, 0xbe, 0xeb, - 0xa5, 0xdc, 0x56, 0xb8, 0xdb, 0x5f, 0x8e, 0x86, 0x8a, 0x8c, 0x4a, 0x30, 0xb3, 0x1d, 0x97, 0xca, - 0xc3, 0x2f, 0xc1, 0x26, 0x16, 0x27, 0xfd, 0xa4, 0xd7, 0x79, 0xee, 0xf5, 0xde, 0x68, 0xa8, 0x6c, - 0xee, 0xe7, 0xcd, 0xb3, 0x1d, 0x16, 0x89, 0xc2, 0x06, 0x58, 0xea, 0xf3, 0x4b, 0x01, 0x95, 0x17, - 0xb8, 0xfe, 0xf6, 0x68, 0xa8, 0x2c, 0x8d, 0xef, 0x09, 0xa1, 0xe6, 0xe2, 0x51, 0x9b, 0x1f, 0x35, - 0x23, 0x14, 0xfc, 0x04, 0xac, 0x9c, 0xbb, 0x94, 0x3d, 0x22, 0xec, 0x2b, 0xd7, 0xbf, 0xe0, 0xd5, - 0xa7, 0xaa, 0x6f, 0x8a, 0x19, 0x5c, 0x79, 0x30, 0x31, 0xa1, 0x24, 0x0e, 0xfe, 0x16, 0x2c, 0x9f, - 0x8b, 0x83, 0x65, 0x54, 0x7a, 0x6e, 0x4e, 0x49, 0xb4, 0xd4, 0x21, 0x54, 0xaf, 0x09, 0xf9, 0xe5, - 0xa8, 0x99, 0xa2, 0x89, 0x1a, 0xfc, 0x09, 0x58, 0xe2, 0x3f, 0xc7, 0x4d, 0xb9, 0xca, 0x7b, 0x73, - 0x55, 0xc0, 0x97, 0x1e, 0x8c, 0x9b, 0x51, 0x64, 0x8f, 0xa0, 0xc7, 0xad, 0x03, 0x79, 0x39, 0x0f, - 0x3d, 0x6e, 0x1d, 0xa0, 0xc8, 0x0e, 0x9f, 0x82, 0x25, 0x4a, 0x1e, 0x5a, 0x4e, 0x70, 0x29, 0x03, - 0xbe, 0xe4, 0x6e, 0x4d, 0xe9, 0x6e, 0xfb, 0x90, 0x23, 0x33, 0x47, 0xfa, 0x89, 0xba, 0xb0, 0xa3, - 0x48, 0x12, 0x9a, 0x60, 0xd9, 0x0f, 0x9c, 0x7d, 0xfa, 0x84, 0x12, 0x5f, 0x5e, 0xc9, 0x9d, 0x27, - 0xb2, 0xfa, 0x28, 0xc2, 0x66, 0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x22, 0x0c, 0x4d, 0x00, 0xf8, - 0x0f, 0xbf, 0x39, 0xc8, 0x3b, 0x33, 0x4f, 0x9a, 0x28, 0x06, 0x67, 0xfd, 0xac, 0x87, 0xcb, 0x73, - 0x62, 0x46, 0x09, 0x5d, 0xf8, 0x57, 0x09, 0x40, 0x1a, 0x78, 0x9e, 0x4d, 0x7a, 0xc4, 0x61, 0xd8, - 0xe6, 0xad, 0x54, 0x5e, 0xe5, 0xee, 0x7e, 0x31, 0x2d, 0x6a, 0x39, 0x52, 0xd6, 0x6d, 0xbc, 0x37, - 0xe7, 0xa1, 0xa8, 0xc0, 0x67, 0x38, 0x69, 0x67, 0x62, 0xb4, 0x6b, 0x33, 0x27, 0xad, 0xf8, 0x1e, - 0x36, 0x99, 0x34, 0x61, 0x47, 0x91, 0x24, 0xfc, 0x02, 0xec, 0x44, 0xb7, 0x54, 0xe4, 0xba, 0xec, - 0xc8, 0xb2, 0x09, 0x1d, 0x50, 0x46, 0x7a, 0xf2, 0x3a, 0x4f, 0xa6, 0xba, 0x60, 0xee, 0xa0, 0x42, - 0x14, 0x2a, 0x61, 0xc3, 0x1e, 0x50, 0xa2, 0x22, 0x14, 0xae, 0xd0, 0xb8, 0x0a, 0x1e, 0x52, 0x03, - 0xdb, 0xe3, 0xd3, 0xd7, 0x55, 0xee, 0xe0, 0xfd, 0xd1, 0x50, 0x51, 0x9a, 0xd3, 0xa1, 0x68, 0x96, - 0x16, 0xfc, 0x0d, 0x90, 0x71, 0x99, 0x9f, 0x0d, 0xee, 0xe7, 0x47, 0x61, 0x65, 0x2b, 0x75, 0x50, - 0xca, 0x86, 0x1e, 0xd8, 0xc0, 0xe9, 0xf7, 0x02, 0x2a, 0xd7, 0xf8, 0x5a, 0xff, 0x60, 0xca, 0x3c, - 0x64, 0x9e, 0x18, 0x74, 0x59, 0x84, 0x71, 0x23, 0x63, 0xa0, 0x28, 0xa7, 0x0e, 0x2f, 0x01, 0xc4, - 0xd9, 0xe7, 0x0d, 0x2a, 0xc3, 0x99, 0x1b, 0x59, 0xee, 0x4d, 0x64, 0x92, 0x6a, 0x39, 0x13, 0x45, - 0x05, 0x3e, 0x20, 0x03, 0x35, 0x9c, 0x79, 0x8e, 0xa1, 0xf2, 0x35, 0xee, 0xf8, 0xa7, 0xb3, 0x1d, - 0xc7, 0x1c, 0xfd, 0xba, 0xf0, 0x5b, 0xcb, 0x5a, 0x28, 0xca, 0x3b, 0x80, 0x0f, 0xc1, 0x96, 0x68, - 0x7c, 0xe2, 0x50, 0x7c, 0x46, 0xda, 0x03, 0x6a, 0x30, 0x9b, 0xca, 0x9b, 0xbc, 0x76, 0xcb, 0xa3, - 0xa1, 0xb2, 0xb5, 0x5f, 0x60, 0x47, 0x85, 0x2c, 0xf8, 0x19, 0xd8, 0x38, 0x73, 0xfd, 0x8e, 0x65, - 0x9a, 0xc4, 0x89, 0x94, 0xb6, 0xb8, 0xd2, 0x56, 0x18, 0xff, 0xa3, 0x8c, 0x0d, 0xe5, 0xd0, 0x90, - 0x82, 0x6d, 0xa1, 0xdc, 0xf2, 0x5d, 0xe3, 0xc4, 0x0d, 0x1c, 0x16, 0x6e, 0x17, 0x54, 0xde, 0x8e, - 0xb7, 0xc8, 0xed, 0xfd, 0x22, 0xc0, 0x8b, 0xa1, 0x72, 0xa3, 0x60, 0xbb, 0x4a, 0x81, 0x50, 0xb1, - 0x36, 0xb4, 0xc1, 0xaa, 0x78, 0x60, 0x3b, 0xb0, 0x31, 0xa5, 0xb2, 0xcc, 0x97, 0xfa, 0xdd, 0xe9, - 0x85, 0x2d, 0x86, 0x67, 0xd7, 0x3b, 0xbf, 0xf9, 0x25, 0x01, 0x28, 0xa5, 0xae, 0xfe, 0x5d, 0x02, - 0xd7, 0x4b, 0x0b, 0x23, 0xbc, 0x9b, 0x7a, 0xb5, 0x51, 0x33, 0xaf, 0x36, 0x30, 0x4f, 0x7c, 0x03, - 0x8f, 0x36, 0x5f, 0x4b, 0x40, 0x2e, 0xdb, 0x21, 0xe0, 0x27, 0xa9, 0x0e, 0xbe, 0x97, 0xe9, 0x60, - 0x2d, 0xc7, 0x7b, 0x03, 0xfd, 0xfb, 0x56, 0x02, 0xef, 0x4c, 0x99, 0x81, 0xb8, 0x20, 0x11, 0x33, - 0x89, 0x7a, 0x84, 0xc3, 0xa5, 0x2c, 0xf1, 0x3c, 0x9a, 0x14, 0xa4, 0x02, 0x0c, 0x2a, 0x65, 0xc3, - 0x27, 0xe0, 0x9a, 0xa8, 0x86, 0x59, 0x1b, 0x3f, 0xb9, 0x2f, 0xeb, 0xef, 0x8c, 0x86, 0xca, 0xb5, - 0x66, 0x31, 0x04, 0x95, 0x71, 0xd5, 0x7f, 0x4a, 0x60, 0xa7, 0x78, 0xcb, 0x87, 0xb7, 0x53, 0xe1, - 0x56, 0x32, 0xe1, 0xbe, 0x9a, 0x61, 0x89, 0x60, 0xff, 0x01, 0xac, 0x8b, 0x83, 0x41, 0xfa, 0x11, - 0x32, 0x15, 0xf4, 0x70, 0x89, 0x84, 0x67, 0x7a, 0x21, 0x11, 0xa5, 0x2f, 0xbf, 0xef, 0xa7, 0xdb, - 0x50, 0x46, 0x4d, 0xfd, 0x97, 0x04, 0xde, 0x9b, 0xb9, 0xd9, 0x42, 0x3d, 0xd5, 0x75, 0x2d, 0xd3, - 0xf5, 0x7a, 0xb9, 0xc0, 0x9b, 0x79, 0x8b, 0xd4, 0x3f, 0x7a, 0xf6, 0xbc, 0x3e, 0xf7, 0xdd, 0xf3, - 0xfa, 0xdc, 0xf7, 0xcf, 0xeb, 0x73, 0x7f, 0x1e, 0xd5, 0xa5, 0x67, 0xa3, 0xba, 0xf4, 0xdd, 0xa8, - 0x2e, 0x7d, 0x3f, 0xaa, 0x4b, 0xff, 0x1b, 0xd5, 0xa5, 0xbf, 0xfd, 0xbf, 0x3e, 0xf7, 0xbb, 0x25, - 0x21, 0xf7, 0x43, 0x00, 0x00, 0x00, 0xff, 0xff, 0xde, 0x4e, 0x7c, 0x8c, 0xa6, 0x18, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { +func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -741,25 +267,42 @@ func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { +func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { +func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -769,221 +312,12 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { +func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Eviction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeleteOptions != nil { - { - size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1088,6 +422,13 @@ func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.UnhealthyPodEvictionPolicy != nil { + i -= len(*m.UnhealthyPodEvictionPolicy) + copy(dAtA[i:], *m.UnhealthyPodEvictionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UnhealthyPodEvictionPolicy))) + i-- + dAtA[i] = 0x22 + } if m.MaxUnavailable != nil { { size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) @@ -1208,3408 +549,247 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA[offset] = uint8(v) + return base } - -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 + if m.MinAvailable != nil { + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.RunAsGroup != nil { - { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } + if m.UnhealthyPodEvictionPolicy != nil { + l = len(*m.UnhealthyPodEvictionPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 + n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Eviction) String() string { + if this == nil { + return "nil" } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x78 + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" } - i-- - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," } - i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + repeatedStringForConditions += "}" + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) } - i-- - dAtA[i] = 0x52 - i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } - i-- - dAtA[i] = 0x48 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllowedCSIDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedFlexVolume) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedHostPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Eviction) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeleteOptions != nil { - l = m.DeleteOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } - -func (m *FSGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IDRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *PodDisruptionBudget) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodDisruptionBudgetList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodDisruptionBudgetSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MinAvailable != nil { - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - for k, v := range m.DisruptedPods { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) - n += 1 + sovGenerated(uint64(m.CurrentHealthy)) - n += 1 + sovGenerated(uint64(m.DesiredHealthy)) - n += 1 + sovGenerated(uint64(m.ExpectedPods)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 3 - } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Eviction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodDisruptionBudget{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) - for k := range this.DisruptedPods { - keysForDisruptedPods = append(keysForDisruptedPods, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]v1.Time{" - for _, k := range keysForDisruptedPods { - mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) - } - mapStringForDisruptedPods += "}" - s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, - `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, - `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, - `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Eviction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Eviction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteOptions == nil { - m.DeleteOptions = &v1.DeleteOptions{} - } - if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IDRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MinAvailable == nil { - m.MinAvailable = &intstr.IntOrString{} - } - if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]v1.Time) - } - var mapkey string - mapvalue := &v1.Time{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.DisruptedPods[mapkey] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) - } - m.DisruptionsAllowed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DisruptionsAllowed |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) - } - m.CurrentHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) - } - m.DesiredHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) - } - m.ExpectedPods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpectedPods |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } +func (m *Eviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4636,14 +816,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4670,110 +849,66 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 22: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4800,16 +935,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 23: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4836,14 +968,13 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 24: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4870,10 +1001,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -4898,7 +1026,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4921,17 +1049,17 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4941,27 +1069,28 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4988,8 +1117,8 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -5014,7 +1143,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5037,17 +1166,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5057,27 +1186,31 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + if m.MinAvailable == nil { + m.MinAvailable = &intstr.IntOrString{} + } + if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5104,66 +1237,18 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5173,27 +1258,31 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5221,8 +1310,8 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) + m.UnhealthyPodEvictionPolicy = &s iNdEx = postIndex default: iNdEx = preIndex @@ -5245,7 +1334,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5268,17 +1357,17 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) } - var stringLen uint64 + m.ObservedGeneration = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5288,27 +1377,14 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5335,68 +1411,149 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]v1.Time) } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := &v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.DisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DisruptionsAllowed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentHealthy |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) } - var stringLen uint64 + m.DesiredHealthy = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5406,27 +1563,33 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.DesiredHealthy |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedPods |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5453,8 +1616,8 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index 8a2824b51..91e33f233 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -21,43 +21,13 @@ syntax = "proto2"; package k8s.io.api.policy.v1beta1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} +option go_package = "k8s.io/api/policy/v1beta1"; // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is @@ -65,42 +35,11 @@ message AllowedHostPath { message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; -} - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -108,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -124,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual PodDisruptionBudget objects repeated PodDisruptionBudget items = 2; @@ -137,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -145,14 +84,42 @@ message PodDisruptionBudgetSpec { // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + optional string unhealthyPodEvictionPolicy = 4; } // PodDisruptionBudgetStatus represents information about the status of a @@ -175,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -207,223 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; -} - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated in 1.21. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go index b3efd6326..d77f13040 100644 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -46,8 +46,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, &Eviction{}, ) // Add the watch version that applies diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 486f93461..bc5f970d2 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -45,8 +44,56 @@ type PodDisruptionBudgetSpec struct { // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` + + // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods + // should be considered for eviction. Current implementation considers healthy pods, + // as pods that have status.conditions item with type="Ready",status="True". + // + // Valid policies are IfHealthyBudget and AlwaysAllow. + // If no policy is specified, the default behavior will be used, + // which corresponds to the IfHealthyBudget policy. + // + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + // + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + // + // Additional policies may be added in the future. + // Clients making eviction decisions should disallow eviction of unhealthy pods + // if they encounter an unrecognized policy in this field. + // + // This field is beta-level. The eviction API uses this field when + // the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default). + // +optional + UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty" protobuf:"bytes,4,opt,name=unhealthyPodEvictionPolicy"` } +// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods +// should be considered for eviction. +// +enum +type UnhealthyPodEvictionPolicyType string + +const ( + // IfHealthyBudget policy means that running pods (status.phase="Running"), + // but not yet healthy can be evicted only if the guarded application is not + // disrupted (status.currentHealthy is at least equal to status.desiredHealthy). + // Healthy pods will be subject to the PDB for eviction. + IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget" + + // AlwaysAllow policy means that all running pods (status.phase="Running"), + // but not yet healthy are considered disrupted and can be evicted regardless + // of whether the criteria in a PDB is met. This means perspective running + // pods of a disrupted application might not get a chance to become healthy. + // Healthy pods will be subject to the PDB for eviction. + AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow" +) + // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { @@ -180,374 +227,3 @@ type Eviction struct { // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.10 -// +k8s:prerelease-lifecycle-gen:deprecated=1.21 -// +k8s:prerelease-lifecycle-gen:removed=1.25 - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated in 1.21. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is an allowlist of volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is an allowlist of host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities -// field and means that any capabilities are allowed to be requested. -var AllowAllCapabilities v1.Capability = "*" - -// FSType gives strong typing to different file systems that are used by volumes. -type FSType string - -const ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - VsphereVolume FSType = "vsphereVolume" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - PhotonPersistentDisk FSType = "photonPersistentDisk" - StorageOS FSType = "storageos" - Projected FSType = "projected" - PortworxVolume FSType = "portworxVolume" - ScaleIO FSType = "scaleIO" - CSI FSType = "csi" - Ephemeral FSType = "ephemeral" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsUserStrategyRunAsAny means that container may make requests for any gid. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. - // However, when FSGroups are specified, they have to fall in the defined range. - FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when gids are specified, they have to fall in the defined range. - SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.10 -// +k8s:prerelease-lifecycle-gen:deprecated=1.21 -// +k8s:prerelease-lifecycle-gen:removed=1.25 - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index ef81d43af..4a79d7594 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -24,37 +24,9 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - var map_Eviction = map[string]string{ "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", "metadata": "ObjectMeta describes the pod that is being evicted.", @@ -65,36 +37,6 @@ func (Eviction) SwaggerDoc() map[string]string { return map_Eviction } -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - var map_PodDisruptionBudget = map[string]string{ "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -117,10 +59,11 @@ func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { } var map_PodDisruptionBudgetSpec = map[string]string{ - "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", - "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", - "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", - "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget. A null selector selects no pods. An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. In policy/v1, an empty selector will select all pods in the namespace.", + "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "unhealthyPodEvictionPolicy": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", } func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { @@ -142,106 +85,4 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { return map_PodDisruptionBudgetStatus } -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is a beta field, and is only honored if the API server enables the CSIInlineVolume feature gate.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 0a6239b87..51895ffdb 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -22,60 +22,11 @@ limitations under the License. package v1beta1 import ( - corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in @@ -107,59 +58,6 @@ func (in *Eviction) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { *out = *in @@ -239,6 +137,11 @@ func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) { *out = new(intstr.IntOrString) **out = **in } + if in.UnhealthyPodEvictionPolicy != nil { + in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy + *out = new(UnhealthyPodEvictionPolicyType) + **out = **in + } return } @@ -281,268 +184,3 @@ func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go index 612061d6c..765a71e47 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go @@ -90,39 +90,3 @@ func (in *PodDisruptionBudgetList) APILifecycleReplacement() schema.GroupVersion func (in *PodDisruptionBudgetList) APILifecycleRemoved() (major, minor int) { return 1, 25 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { - return 1, 10 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { - return 1, 21 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { - return 1, 10 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { - return 1, 21 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { - return 1, 25 -} diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 80f43ce92..b0e4e5b5b 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=rbac.authorization.k8s.io package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 678c00512..112d18fb0 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto +// source: k8s.io/api/rbac/v1/generated.proto package v1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{0} + return fileDescriptor_c8ba2e7dd472de66, []int{0} } func (m *AggregationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_AggregationRule proto.InternalMessageInfo func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{1} + return fileDescriptor_c8ba2e7dd472de66, []int{1} } func (m *ClusterRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_ClusterRole proto.InternalMessageInfo func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{2} + return fileDescriptor_c8ba2e7dd472de66, []int{2} } func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{3} + return fileDescriptor_c8ba2e7dd472de66, []int{3} } func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{4} + return fileDescriptor_c8ba2e7dd472de66, []int{4} } func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{5} + return fileDescriptor_c8ba2e7dd472de66, []int{5} } func (m *PolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_PolicyRule proto.InternalMessageInfo func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{6} + return fileDescriptor_c8ba2e7dd472de66, []int{6} } func (m *Role) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +243,7 @@ var xxx_messageInfo_Role proto.InternalMessageInfo func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{7} + return fileDescriptor_c8ba2e7dd472de66, []int{7} } func (m *RoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +271,7 @@ var xxx_messageInfo_RoleBinding proto.InternalMessageInfo func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{8} + return fileDescriptor_c8ba2e7dd472de66, []int{8} } func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +299,7 @@ var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{9} + return fileDescriptor_c8ba2e7dd472de66, []int{9} } func (m *RoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +327,7 @@ var xxx_messageInfo_RoleList proto.InternalMessageInfo func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{10} + return fileDescriptor_c8ba2e7dd472de66, []int{10} } func (m *RoleRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +355,7 @@ var xxx_messageInfo_RoleRef proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{11} + return fileDescriptor_c8ba2e7dd472de66, []int{11} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,62 +396,61 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) -} - -var fileDescriptor_979ffd7b30c07419 = []byte{ - // 807 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, - 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, - 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, - 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, - 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, - 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, - 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, - 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, - 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, - 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, - 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, - 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, - 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, - 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, - 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, - 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, - 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, - 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, - 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, - 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, - 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, - 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, - 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, - 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, - 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, - 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, - 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, - 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, - 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, - 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, - 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, - 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, - 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, - 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, - 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, - 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, - 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, - 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, - 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, - 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, - 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, - 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, - 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, - 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, - 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, - 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, - 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, - 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, - 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, - 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/rbac/v1/generated.proto", fileDescriptor_c8ba2e7dd472de66) +} + +var fileDescriptor_c8ba2e7dd472de66 = []byte{ + // 790 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x4d, 0x6f, 0xd3, 0x4a, + 0x14, 0xcd, 0xa4, 0x89, 0x1a, 0x4f, 0x5e, 0x94, 0xd7, 0x79, 0xd5, 0x93, 0xd5, 0xf7, 0xe4, 0x54, + 0x46, 0x42, 0x95, 0x00, 0x9b, 0x16, 0x04, 0xdd, 0x74, 0x51, 0x17, 0x81, 0xaa, 0x96, 0x52, 0x4d, + 0x05, 0x0b, 0xc4, 0x82, 0x89, 0x33, 0x75, 0x87, 0xf8, 0x4b, 0x1e, 0x3b, 0x52, 0xc5, 0x06, 0x21, + 0xb1, 0x60, 0xc7, 0x12, 0x7e, 0x01, 0x1b, 0x58, 0xf2, 0x0b, 0xd8, 0x74, 0xd9, 0x65, 0x57, 0x11, + 0x35, 0x3f, 0x04, 0xe4, 0xaf, 0x38, 0x1f, 0x2e, 0xcd, 0x2a, 0x12, 0x62, 0x95, 0xcc, 0xbd, 0xe7, + 0x9e, 0x7b, 0xe6, 0xd8, 0xf7, 0x26, 0x50, 0xee, 0xae, 0x73, 0x85, 0x39, 0x2a, 0x71, 0x99, 0xea, + 0xb5, 0x89, 0xae, 0xf6, 0x56, 0x55, 0x83, 0xda, 0xd4, 0x23, 0x3e, 0xed, 0x28, 0xae, 0xe7, 0xf8, + 0x0e, 0x42, 0x09, 0x46, 0x21, 0x2e, 0x53, 0x22, 0x8c, 0xd2, 0x5b, 0x5d, 0xba, 0x61, 0x30, 0xff, + 0x28, 0x68, 0x2b, 0xba, 0x63, 0xa9, 0x86, 0x63, 0x38, 0x6a, 0x0c, 0x6d, 0x07, 0x87, 0xf1, 0x29, + 0x3e, 0xc4, 0xdf, 0x12, 0x8a, 0xa5, 0xdb, 0x79, 0x1b, 0x8b, 0xe8, 0x47, 0xcc, 0xa6, 0xde, 0xb1, + 0xea, 0x76, 0x8d, 0x28, 0xc0, 0x55, 0x8b, 0xfa, 0xa4, 0xa0, 0xf1, 0x92, 0x7a, 0x51, 0x95, 0x17, + 0xd8, 0x3e, 0xb3, 0xe8, 0x44, 0xc1, 0x9d, 0xcb, 0x0a, 0xb8, 0x7e, 0x44, 0x2d, 0x32, 0x5e, 0x27, + 0x7f, 0x00, 0xb0, 0xb9, 0x69, 0x18, 0x1e, 0x35, 0x88, 0xcf, 0x1c, 0x1b, 0x07, 0x26, 0x45, 0x6f, + 0x00, 0x5c, 0xd4, 0xcd, 0x80, 0xfb, 0xd4, 0xc3, 0x8e, 0x49, 0x0f, 0xa8, 0x49, 0x75, 0xdf, 0xf1, + 0xb8, 0x08, 0x96, 0xe7, 0x56, 0xea, 0x6b, 0xb7, 0x94, 0xdc, 0x95, 0x41, 0x2f, 0xc5, 0xed, 0x1a, + 0x51, 0x80, 0x2b, 0xd1, 0x95, 0x94, 0xde, 0xaa, 0xb2, 0x4b, 0xda, 0xd4, 0xcc, 0x6a, 0xb5, 0xff, + 0x4f, 0xfa, 0xad, 0x52, 0xd8, 0x6f, 0x2d, 0x6e, 0x15, 0x10, 0xe3, 0xc2, 0x76, 0xf2, 0xfb, 0x32, + 0xac, 0x0f, 0xc1, 0xd1, 0x73, 0x58, 0x8b, 0xc8, 0x3b, 0xc4, 0x27, 0x22, 0x58, 0x06, 0x2b, 0xf5, + 0xb5, 0x9b, 0xd3, 0x49, 0x79, 0xd4, 0x7e, 0x41, 0x75, 0xff, 0x21, 0xf5, 0x89, 0x86, 0x52, 0x1d, + 0x30, 0x8f, 0xe1, 0x01, 0x2b, 0xda, 0x82, 0x55, 0x2f, 0x30, 0x29, 0x17, 0xcb, 0xf1, 0x4d, 0x25, + 0x65, 0xf2, 0xf9, 0x2b, 0xfb, 0x8e, 0xc9, 0xf4, 0xe3, 0xc8, 0x28, 0xad, 0x91, 0x92, 0x55, 0xa3, + 0x13, 0xc7, 0x49, 0x2d, 0x6a, 0xc3, 0x26, 0x19, 0x75, 0x54, 0x9c, 0x8b, 0xd5, 0x5e, 0x29, 0xa2, + 0x1b, 0x33, 0x5f, 0xfb, 0x27, 0xec, 0xb7, 0xc6, 0x9f, 0x08, 0x1e, 0x27, 0x94, 0xdf, 0x96, 0x21, + 0x1a, 0xb2, 0x46, 0x63, 0x76, 0x87, 0xd9, 0xc6, 0x0c, 0x1c, 0xda, 0x86, 0x35, 0x1e, 0xc4, 0x89, + 0xcc, 0xa4, 0xff, 0x8a, 0x6e, 0x75, 0x90, 0x60, 0xb4, 0xbf, 0x53, 0xb2, 0x5a, 0x1a, 0xe0, 0x78, + 0x50, 0x8e, 0xee, 0xc3, 0x79, 0xcf, 0x31, 0x29, 0xa6, 0x87, 0xa9, 0x3f, 0x85, 0x4c, 0x38, 0x81, + 0x68, 0xcd, 0x94, 0x69, 0x3e, 0x0d, 0xe0, 0xac, 0x58, 0xfe, 0x0a, 0xe0, 0xbf, 0x93, 0x5e, 0xec, + 0x32, 0xee, 0xa3, 0x67, 0x13, 0x7e, 0x28, 0x53, 0xbe, 0xbc, 0x8c, 0x27, 0x6e, 0x0c, 0x2e, 0x90, + 0x45, 0x86, 0xbc, 0xd8, 0x81, 0x55, 0xe6, 0x53, 0x2b, 0x33, 0xe2, 0x6a, 0x91, 0xfc, 0x49, 0x61, + 0xf9, 0x5b, 0xb3, 0x1d, 0x15, 0xe3, 0x84, 0x43, 0xfe, 0x02, 0x60, 0x73, 0x08, 0x3c, 0x03, 0xf9, + 0xf7, 0x46, 0xe5, 0xb7, 0x2e, 0x93, 0x5f, 0xac, 0xfb, 0x07, 0x80, 0x30, 0x1f, 0x09, 0xd4, 0x82, + 0xd5, 0x1e, 0xf5, 0xda, 0xc9, 0xae, 0x10, 0x34, 0x21, 0xc2, 0x3f, 0x89, 0x02, 0x38, 0x89, 0xa3, + 0x6b, 0x50, 0x20, 0x2e, 0x7b, 0xe0, 0x39, 0x81, 0x9b, 0x74, 0x16, 0xb4, 0x46, 0xd8, 0x6f, 0x09, + 0x9b, 0xfb, 0xdb, 0x49, 0x10, 0xe7, 0xf9, 0x08, 0xec, 0x51, 0xee, 0x04, 0x9e, 0x4e, 0xb9, 0x38, + 0x97, 0x83, 0x71, 0x16, 0xc4, 0x79, 0x1e, 0xdd, 0x85, 0x8d, 0xec, 0xb0, 0x47, 0x2c, 0xca, 0xc5, + 0x4a, 0x5c, 0xb0, 0x10, 0xf6, 0x5b, 0x0d, 0x3c, 0x9c, 0xc0, 0xa3, 0x38, 0xb4, 0x01, 0x9b, 0xb6, + 0x63, 0x67, 0x90, 0xc7, 0x78, 0x97, 0x8b, 0xd5, 0xb8, 0x34, 0x9e, 0xc5, 0xbd, 0xd1, 0x14, 0x1e, + 0xc7, 0xca, 0x9f, 0x01, 0xac, 0xfc, 0x46, 0xfb, 0x49, 0x7e, 0x5d, 0x86, 0xf5, 0x3f, 0x7e, 0x69, + 0x44, 0xe3, 0x36, 0xdb, 0x6d, 0x31, 0xcd, 0xb8, 0x5d, 0xbe, 0x26, 0x3e, 0x02, 0x58, 0x9b, 0xd1, + 0x7e, 0xd8, 0x18, 0x15, 0x2c, 0x5e, 0x28, 0xb8, 0x58, 0xe9, 0x4b, 0x98, 0xb9, 0x8e, 0xae, 0xc3, + 0x5a, 0x36, 0xd3, 0xb1, 0x4e, 0x21, 0xef, 0x9b, 0x8d, 0x3d, 0x1e, 0x20, 0xd0, 0x32, 0xac, 0x74, + 0x99, 0xdd, 0x11, 0xcb, 0x31, 0xf2, 0xaf, 0x14, 0x59, 0xd9, 0x61, 0x76, 0x07, 0xc7, 0x99, 0x08, + 0x61, 0x13, 0x2b, 0xf9, 0x59, 0x1d, 0x42, 0x44, 0xd3, 0x8c, 0xe3, 0x8c, 0xfc, 0x09, 0xc0, 0xf9, + 0xf4, 0xed, 0x19, 0xf0, 0x81, 0x0b, 0xf9, 0x86, 0xf5, 0x95, 0xa7, 0xd1, 0xf7, 0xeb, 0xee, 0x48, + 0x85, 0x42, 0xf4, 0xc9, 0x5d, 0xa2, 0x53, 0xb1, 0x12, 0xc3, 0x16, 0x52, 0x98, 0xb0, 0x97, 0x25, + 0x70, 0x8e, 0xd1, 0xd6, 0x4f, 0xce, 0xa5, 0xd2, 0xe9, 0xb9, 0x54, 0x3a, 0x3b, 0x97, 0x4a, 0xaf, + 0x42, 0x09, 0x9c, 0x84, 0x12, 0x38, 0x0d, 0x25, 0x70, 0x16, 0x4a, 0xe0, 0x5b, 0x28, 0x81, 0x77, + 0xdf, 0xa5, 0xd2, 0x53, 0x34, 0xf9, 0x8f, 0xf5, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xff, + 0x5a, 0x4f, 0xc6, 0x0a, 0x00, 0x00, } func (m *AggregationRule) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 2cf427f12..87b8f832d 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -26,24 +26,26 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/rbac/v1"; // AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole message AggregationRule { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -58,14 +60,16 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. optional RoleRef roleRef = 3; } @@ -73,7 +77,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -83,7 +87,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -93,25 +97,30 @@ message ClusterRoleList { // about who the rule applies to or which namespace the rule applies to. message PolicyRule { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic repeated string resourceNames = 4; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic repeated string nonResourceURLs = 5; } @@ -119,10 +128,11 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic repeated PolicyRule rules = 2; } @@ -132,14 +142,16 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. optional RoleRef roleRef = 3; } @@ -147,7 +159,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -157,7 +169,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 067b6f15e..f9628b853 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -48,23 +48,28 @@ const ( // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` } @@ -79,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -101,6 +106,7 @@ type RoleRef struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { @@ -111,11 +117,13 @@ type Role struct { // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given @@ -128,14 +136,17 @@ type RoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { @@ -149,6 +160,7 @@ type RoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleList is a collection of Roles type RoleList struct { @@ -164,6 +176,7 @@ type RoleList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { @@ -174,6 +187,7 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -188,12 +202,14 @@ type AggregationRule struct { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional + // +listType=atomic ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. @@ -205,14 +221,17 @@ type ClusterRoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. + // This field is immutable. RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { @@ -226,6 +245,7 @@ type ClusterRoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 228ee54c0..0471a5594 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ @@ -51,7 +51,7 @@ var map_ClusterRoleBinding = map[string]string{ "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", } func (ClusterRoleBinding) SwaggerDoc() map[string]string { @@ -81,7 +81,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", @@ -105,7 +105,7 @@ var map_RoleBinding = map[string]string{ "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", } func (RoleBinding) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..f6f74413b --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRole) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Role) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 94c1bef8b..ee3c7bfcc 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +// source: k8s.io/api/rbac/v1alpha1/generated.proto package v1alpha1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{0} + return fileDescriptor_758889dfd9a88fa6, []int{0} } func (m *AggregationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_AggregationRule proto.InternalMessageInfo func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{1} + return fileDescriptor_758889dfd9a88fa6, []int{1} } func (m *ClusterRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_ClusterRole proto.InternalMessageInfo func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{2} + return fileDescriptor_758889dfd9a88fa6, []int{2} } func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{3} + return fileDescriptor_758889dfd9a88fa6, []int{3} } func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{4} + return fileDescriptor_758889dfd9a88fa6, []int{4} } func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{5} + return fileDescriptor_758889dfd9a88fa6, []int{5} } func (m *PolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_PolicyRule proto.InternalMessageInfo func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{6} + return fileDescriptor_758889dfd9a88fa6, []int{6} } func (m *Role) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +243,7 @@ var xxx_messageInfo_Role proto.InternalMessageInfo func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{7} + return fileDescriptor_758889dfd9a88fa6, []int{7} } func (m *RoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +271,7 @@ var xxx_messageInfo_RoleBinding proto.InternalMessageInfo func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{8} + return fileDescriptor_758889dfd9a88fa6, []int{8} } func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +299,7 @@ var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{9} + return fileDescriptor_758889dfd9a88fa6, []int{9} } func (m *RoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +327,7 @@ var xxx_messageInfo_RoleList proto.InternalMessageInfo func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{10} + return fileDescriptor_758889dfd9a88fa6, []int{10} } func (m *RoleRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +355,7 @@ var xxx_messageInfo_RoleRef proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{11} + return fileDescriptor_758889dfd9a88fa6, []int{11} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,63 +396,63 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) -} - -var fileDescriptor_b59b0bd5e7cb9590 = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, - 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, - 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, - 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, - 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, - 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, - 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, - 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, - 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, - 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, - 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, - 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, - 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, - 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, - 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, - 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, - 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, - 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, - 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, - 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, - 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, - 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, - 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, - 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, - 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, - 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, - 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, - 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, - 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, - 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, - 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, - 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, - 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, - 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, - 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, - 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, - 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, - 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, - 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, - 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, - 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, - 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, - 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, - 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, - 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, - 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, - 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, - 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_758889dfd9a88fa6) +} + +var fileDescriptor_758889dfd9a88fa6 = []byte{ + // 819 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x09, 0x4d, 0x26, 0x44, 0xa1, 0x43, 0x85, 0xac, 0x0a, 0x39, 0xc5, 0x02, 0xa9, + 0x88, 0x62, 0xd3, 0x82, 0x80, 0x0b, 0x48, 0x75, 0x0f, 0x28, 0x10, 0xda, 0x32, 0x15, 0x3d, 0x20, + 0x0e, 0x4c, 0x9c, 0xa9, 0x33, 0xc4, 0xbf, 0xe4, 0xb1, 0x23, 0x55, 0x5c, 0xb8, 0x70, 0x45, 0x5c, + 0x38, 0x70, 0xe7, 0xca, 0x85, 0x3d, 0xee, 0x3f, 0xd0, 0xbd, 0xf5, 0xd8, 0x53, 0xb4, 0xf5, 0xfe, + 0x21, 0xbb, 0xf2, 0xd8, 0x8e, 0x9d, 0x5f, 0x9b, 0x9c, 0x22, 0xad, 0xb4, 0xa7, 0x64, 0xde, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0x66, 0xde, 0x4b, 0xe0, 0xc1, 0xf0, 0x4b, 0xae, 0x32, 0x57, 0x23, 0x1e, + 0xd3, 0xfc, 0x1e, 0x31, 0xb4, 0xd1, 0x11, 0xb1, 0xbc, 0x01, 0x39, 0xd2, 0x4c, 0xea, 0x50, 0x9f, + 0x04, 0xb4, 0xaf, 0x7a, 0xbe, 0x1b, 0xb8, 0x48, 0x4a, 0x90, 0x2a, 0xf1, 0x98, 0x1a, 0x23, 0xd5, + 0x0c, 0xb9, 0xf7, 0xb1, 0xc9, 0x82, 0x41, 0xd8, 0x53, 0x0d, 0xd7, 0xd6, 0x4c, 0xd7, 0x74, 0x35, + 0x11, 0xd0, 0x0b, 0xaf, 0xc5, 0x49, 0x1c, 0xc4, 0xb7, 0x84, 0x68, 0xef, 0xb3, 0x3c, 0xa5, 0x4d, + 0x8c, 0x01, 0x73, 0xa8, 0x7f, 0xa3, 0x79, 0x43, 0x33, 0x36, 0x70, 0xcd, 0xa6, 0x01, 0xd1, 0x46, + 0x73, 0xe9, 0xf7, 0xb4, 0x65, 0x51, 0x7e, 0xe8, 0x04, 0xcc, 0xa6, 0x73, 0x01, 0x9f, 0xaf, 0x0a, + 0xe0, 0xc6, 0x80, 0xda, 0x64, 0x36, 0x4e, 0xf9, 0x07, 0xc0, 0xd6, 0x89, 0x69, 0xfa, 0xd4, 0x24, + 0x01, 0x73, 0x1d, 0x1c, 0x5a, 0x14, 0xfd, 0x01, 0xe0, 0xae, 0x61, 0x85, 0x3c, 0xa0, 0x3e, 0x76, + 0x2d, 0x7a, 0x49, 0x2d, 0x6a, 0x04, 0xae, 0xcf, 0x25, 0xb0, 0xbf, 0x75, 0xd0, 0x38, 0xfe, 0x54, + 0xcd, 0xb5, 0x99, 0xe4, 0x52, 0xbd, 0xa1, 0x19, 0x1b, 0xb8, 0x1a, 0xb7, 0xa4, 0x8e, 0x8e, 0xd4, + 0x2e, 0xe9, 0x51, 0x2b, 0x8b, 0xd5, 0xdf, 0xbd, 0x1d, 0xb7, 0x4b, 0xd1, 0xb8, 0xbd, 0x7b, 0xba, + 0x80, 0x18, 0x2f, 0x4c, 0xa7, 0xfc, 0x5b, 0x86, 0x8d, 0x02, 0x1c, 0xfd, 0x02, 0x6b, 0x31, 0x79, + 0x9f, 0x04, 0x44, 0x02, 0xfb, 0xe0, 0xa0, 0x71, 0xfc, 0xc9, 0x7a, 0xa5, 0x9c, 0xf7, 0x7e, 0xa5, + 0x46, 0xf0, 0x3d, 0x0d, 0x88, 0x8e, 0xd2, 0x3a, 0x60, 0x6e, 0xc3, 0x13, 0x56, 0xd4, 0x81, 0x55, + 0x3f, 0xb4, 0x28, 0x97, 0xca, 0xa2, 0xd3, 0xf7, 0xd5, 0x65, 0xaf, 0x40, 0xbd, 0x70, 0x2d, 0x66, + 0xdc, 0xc4, 0x72, 0xe9, 0xcd, 0x94, 0xb2, 0x1a, 0x9f, 0x38, 0x4e, 0x18, 0xd0, 0x00, 0xb6, 0xc8, + 0xb4, 0xae, 0xd2, 0x96, 0xa8, 0xf9, 0xc3, 0xe5, 0xa4, 0x33, 0x17, 0xa1, 0xbf, 0x1d, 0x8d, 0xdb, + 0xb3, 0xb7, 0x83, 0x67, 0x69, 0x95, 0xbf, 0xcb, 0x10, 0x15, 0x64, 0xd2, 0x99, 0xd3, 0x67, 0x8e, + 0xb9, 0x01, 0xb5, 0xce, 0x61, 0x8d, 0x87, 0xc2, 0x91, 0x09, 0xf6, 0xde, 0xf2, 0xde, 0x2e, 0x13, + 0xa4, 0xfe, 0x56, 0x4a, 0x59, 0x4b, 0x0d, 0x1c, 0x4f, 0x48, 0x50, 0x17, 0x6e, 0xfb, 0xae, 0x45, + 0x31, 0xbd, 0x4e, 0xb5, 0x7a, 0x09, 0x1f, 0x4e, 0x80, 0x7a, 0x2b, 0xe5, 0xdb, 0x4e, 0x0d, 0x38, + 0xa3, 0x50, 0x9e, 0x00, 0xf8, 0xce, 0xbc, 0x2e, 0x5d, 0xc6, 0x03, 0xf4, 0xf3, 0x9c, 0x36, 0xea, + 0x9a, 0x8f, 0x9a, 0xf1, 0x44, 0x99, 0x49, 0x1b, 0x99, 0xa5, 0xa0, 0xcb, 0x0f, 0xb0, 0xca, 0x02, + 0x6a, 0x67, 0xa2, 0x1c, 0x2e, 0x6f, 0x62, 0xbe, 0xbc, 0xfc, 0x35, 0x75, 0x62, 0x0a, 0x9c, 0x30, + 0x29, 0x8f, 0x01, 0x6c, 0x15, 0xc0, 0x1b, 0x68, 0xe2, 0xdb, 0xe9, 0x26, 0x3e, 0x58, 0xaf, 0x89, + 0xc5, 0xd5, 0x3f, 0x07, 0x10, 0xe6, 0x03, 0x83, 0xda, 0xb0, 0x3a, 0xa2, 0x7e, 0x2f, 0xd9, 0x27, + 0x75, 0xbd, 0x1e, 0xe3, 0xaf, 0x62, 0x03, 0x4e, 0xec, 0xe8, 0x23, 0x58, 0x27, 0x1e, 0xfb, 0xc6, + 0x77, 0x43, 0x8f, 0x4b, 0x5b, 0x02, 0xd4, 0x8c, 0xc6, 0xed, 0xfa, 0xc9, 0x45, 0x27, 0x31, 0xe2, + 0xdc, 0x1f, 0x83, 0x7d, 0xca, 0xdd, 0xd0, 0x37, 0x28, 0x97, 0x2a, 0x39, 0x18, 0x67, 0x46, 0x9c, + 0xfb, 0xd1, 0x17, 0xb0, 0x99, 0x1d, 0xce, 0x88, 0x4d, 0xb9, 0x54, 0x15, 0x01, 0x3b, 0xd1, 0xb8, + 0xdd, 0xc4, 0x45, 0x07, 0x9e, 0xc6, 0xa1, 0xaf, 0x60, 0xcb, 0x71, 0x9d, 0x0c, 0xf2, 0x23, 0xee, + 0x72, 0xe9, 0x0d, 0x11, 0x2a, 0x66, 0xf4, 0x6c, 0xda, 0x85, 0x67, 0xb1, 0xca, 0x23, 0x00, 0x2b, + 0xaf, 0xdc, 0x0e, 0x53, 0xfe, 0x2c, 0xc3, 0xc6, 0xeb, 0x95, 0x52, 0x58, 0x29, 0xf1, 0x18, 0x6e, + 0x76, 0x97, 0xac, 0x3f, 0x86, 0xab, 0x97, 0xc8, 0x7f, 0x00, 0xd6, 0x36, 0xb4, 0x3d, 0x4e, 0xa7, + 0xcb, 0x96, 0x57, 0x94, 0xbd, 0xb8, 0xde, 0xdf, 0x60, 0x76, 0x03, 0xe8, 0x10, 0xd6, 0xb2, 0x89, + 0x17, 0xd5, 0xd6, 0xf3, 0xec, 0xd9, 0x52, 0xc0, 0x13, 0x04, 0xda, 0x87, 0x95, 0x21, 0x73, 0xfa, + 0x52, 0x59, 0x20, 0xdf, 0x4c, 0x91, 0x95, 0xef, 0x98, 0xd3, 0xc7, 0xc2, 0x13, 0x23, 0x1c, 0x62, + 0x27, 0x3f, 0xc9, 0x05, 0x44, 0x3c, 0xeb, 0x58, 0x78, 0x94, 0xff, 0x01, 0xdc, 0x4e, 0xdf, 0xd3, + 0x84, 0x0f, 0x2c, 0xe5, 0x3b, 0x86, 0x90, 0x78, 0xec, 0x8a, 0xfa, 0x9c, 0xb9, 0x4e, 0x9a, 0x77, + 0xf2, 0xd2, 0x4f, 0x2e, 0x3a, 0xa9, 0x07, 0x17, 0x50, 0xab, 0x6b, 0x40, 0x1a, 0xac, 0xc7, 0x9f, + 0xdc, 0x23, 0x06, 0x95, 0x2a, 0x02, 0xb6, 0x93, 0xc2, 0xea, 0x67, 0x99, 0x03, 0xe7, 0x18, 0xfd, + 0xeb, 0xdb, 0x07, 0xb9, 0x74, 0xf7, 0x20, 0x97, 0xee, 0x1f, 0xe4, 0xd2, 0xef, 0x91, 0x0c, 0x6e, + 0x23, 0x19, 0xdc, 0x45, 0x32, 0xb8, 0x8f, 0x64, 0xf0, 0x34, 0x92, 0xc1, 0x5f, 0xcf, 0xe4, 0xd2, + 0x4f, 0xd2, 0xb2, 0x7f, 0xc1, 0x2f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x02, 0x55, 0xe5, 0x20, + 0x0b, 0x00, 0x00, } func (m *AggregationRule) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 9795cffd9..19d43cdee 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -26,14 +26,15 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "k8s.io/api/rbac/v1alpha1"; // AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole message AggregationRule { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -41,10 +42,11 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -60,10 +62,11 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. @@ -76,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -87,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -97,25 +100,30 @@ message ClusterRoleList { // about who the rule applies to or which namespace the rule applies to. message PolicyRule { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic repeated string apiGroups = 3; // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic repeated string resources = 4; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic repeated string resourceNames = 5; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic repeated string nonResourceURLs = 6; } @@ -124,10 +132,11 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic repeated PolicyRule rules = 2; } @@ -138,10 +147,11 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -154,7 +164,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -165,7 +175,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 13a0a1f0a..2146b4ce3 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -48,23 +48,28 @@ const ( // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` } @@ -79,7 +84,7 @@ type Subject struct { // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. // +k8s:conversion-gen=false // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -111,6 +116,7 @@ type Role struct { // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -129,6 +135,7 @@ type RoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -178,6 +185,7 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -192,6 +200,7 @@ type AggregationRule struct { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional + // +listType=atomic ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } @@ -210,6 +219,7 @@ type ClusterRoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index 46b8b9ee6..6708f3e58 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ @@ -81,7 +81,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index ad5d7cb05..9052d7e8d 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto +// source: k8s.io/api/rbac/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *AggregationRule) Reset() { *m = AggregationRule{} } func (*AggregationRule) ProtoMessage() {} func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{0} + return fileDescriptor_c5bc2d145acd4e45, []int{0} } func (m *AggregationRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,7 +75,7 @@ var xxx_messageInfo_AggregationRule proto.InternalMessageInfo func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{1} + return fileDescriptor_c5bc2d145acd4e45, []int{1} } func (m *ClusterRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +103,7 @@ var xxx_messageInfo_ClusterRole proto.InternalMessageInfo func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{2} + return fileDescriptor_c5bc2d145acd4e45, []int{2} } func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +131,7 @@ var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{3} + return fileDescriptor_c5bc2d145acd4e45, []int{3} } func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +159,7 @@ var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{4} + return fileDescriptor_c5bc2d145acd4e45, []int{4} } func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +187,7 @@ var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{5} + return fileDescriptor_c5bc2d145acd4e45, []int{5} } func (m *PolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +215,7 @@ var xxx_messageInfo_PolicyRule proto.InternalMessageInfo func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{6} + return fileDescriptor_c5bc2d145acd4e45, []int{6} } func (m *Role) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +243,7 @@ var xxx_messageInfo_Role proto.InternalMessageInfo func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{7} + return fileDescriptor_c5bc2d145acd4e45, []int{7} } func (m *RoleBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +271,7 @@ var xxx_messageInfo_RoleBinding proto.InternalMessageInfo func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{8} + return fileDescriptor_c5bc2d145acd4e45, []int{8} } func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +299,7 @@ var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{9} + return fileDescriptor_c5bc2d145acd4e45, []int{9} } func (m *RoleList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +327,7 @@ var xxx_messageInfo_RoleList proto.InternalMessageInfo func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{10} + return fileDescriptor_c5bc2d145acd4e45, []int{10} } func (m *RoleRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +355,7 @@ var xxx_messageInfo_RoleRef proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{11} + return fileDescriptor_c5bc2d145acd4e45, []int{11} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -396,62 +396,61 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) -} - -var fileDescriptor_99f6bec96facc83d = []byte{ - // 808 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, - 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, - 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, - 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, - 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, - 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, - 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, - 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, - 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, - 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, - 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, - 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, - 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, - 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, - 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, - 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, - 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, - 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, - 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, - 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, - 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, - 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, - 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, - 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, - 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, - 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, - 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, - 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, - 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, - 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, - 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, - 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, - 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, - 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, - 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, - 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, - 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, - 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, - 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, - 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, - 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, - 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, - 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, - 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, - 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, - 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, - 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, - 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, - 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, - 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_c5bc2d145acd4e45) +} + +var fileDescriptor_c5bc2d145acd4e45 = []byte{ + // 800 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x3b, 0x6f, 0xe3, 0x46, + 0x10, 0xd6, 0xca, 0x12, 0x2c, 0xae, 0x22, 0x28, 0xde, 0x18, 0x31, 0x61, 0x24, 0x94, 0xa0, 0x04, + 0x88, 0x01, 0x27, 0x64, 0xec, 0x04, 0x49, 0x1a, 0x17, 0x66, 0x8a, 0xc4, 0xb0, 0xa3, 0x18, 0x6b, + 0x24, 0x45, 0x90, 0x22, 0x2b, 0x6a, 0x4d, 0x6f, 0xc4, 0x17, 0xb8, 0xa4, 0x00, 0x23, 0x4d, 0x9a, + 0xeb, 0xae, 0x38, 0xe0, 0xaa, 0x6b, 0xaf, 0xbe, 0xea, 0xca, 0xfb, 0x05, 0x2a, 0x5d, 0xba, 0x12, + 0xce, 0xbc, 0x1f, 0x72, 0x87, 0xe5, 0x43, 0xd4, 0x8b, 0xb6, 0x2a, 0x01, 0x07, 0x5c, 0x25, 0xed, + 0xcc, 0x37, 0xdf, 0xcc, 0x7c, 0xbb, 0x33, 0x12, 0xfc, 0x6a, 0xf0, 0x13, 0x57, 0x99, 0xab, 0x11, + 0x8f, 0x69, 0x7e, 0x8f, 0x18, 0xda, 0xf0, 0xa0, 0x47, 0x03, 0x72, 0xa0, 0x99, 0xd4, 0xa1, 0x3e, + 0x09, 0x68, 0x5f, 0xf5, 0x7c, 0x37, 0x70, 0xd1, 0x4e, 0x02, 0x54, 0x89, 0xc7, 0x54, 0x01, 0x54, + 0x53, 0xe0, 0xee, 0x37, 0x26, 0x0b, 0xae, 0xc2, 0x9e, 0x6a, 0xb8, 0xb6, 0x66, 0xba, 0xa6, 0xab, + 0xc5, 0xf8, 0x5e, 0x78, 0x19, 0x9f, 0xe2, 0x43, 0xfc, 0x2d, 0xe1, 0xd9, 0xfd, 0x3e, 0x4f, 0x68, + 0x13, 0xe3, 0x8a, 0x39, 0xd4, 0xbf, 0xd6, 0xbc, 0x81, 0x29, 0x0c, 0x5c, 0xb3, 0x69, 0x40, 0xb4, + 0xe1, 0x42, 0xf6, 0x5d, 0xad, 0x28, 0xca, 0x0f, 0x9d, 0x80, 0xd9, 0x74, 0x21, 0xe0, 0x87, 0x87, + 0x02, 0xb8, 0x71, 0x45, 0x6d, 0x32, 0x1f, 0xd7, 0x79, 0x06, 0x60, 0xf3, 0xd8, 0x34, 0x7d, 0x6a, + 0x92, 0x80, 0xb9, 0x0e, 0x0e, 0x2d, 0x8a, 0x1e, 0x01, 0xb8, 0x6d, 0x58, 0x21, 0x0f, 0xa8, 0x8f, + 0x5d, 0x8b, 0x5e, 0x50, 0x8b, 0x1a, 0x81, 0xeb, 0x73, 0x19, 0xb4, 0x37, 0xf6, 0xea, 0x87, 0xdf, + 0xa9, 0xb9, 0x34, 0x93, 0x5c, 0xaa, 0x37, 0x30, 0x85, 0x81, 0xab, 0xa2, 0x25, 0x75, 0x78, 0xa0, + 0x9e, 0x91, 0x1e, 0xb5, 0xb2, 0x58, 0xfd, 0xb3, 0xd1, 0xb8, 0x55, 0x8a, 0xc6, 0xad, 0xed, 0x9f, + 0x97, 0x10, 0xe3, 0xa5, 0xe9, 0x3a, 0xcf, 0xcb, 0xb0, 0x3e, 0x05, 0x47, 0xff, 0xc0, 0x9a, 0x20, + 0xef, 0x93, 0x80, 0xc8, 0xa0, 0x0d, 0xf6, 0xea, 0x87, 0xdf, 0xae, 0x56, 0xca, 0xef, 0xbd, 0x7f, + 0xa9, 0x11, 0xfc, 0x46, 0x03, 0xa2, 0xa3, 0xb4, 0x0e, 0x98, 0xdb, 0xf0, 0x84, 0x15, 0xfd, 0x0a, + 0xab, 0x7e, 0x68, 0x51, 0x2e, 0x97, 0xe3, 0x4e, 0xbf, 0x50, 0x0b, 0x1e, 0x81, 0x7a, 0xee, 0x5a, + 0xcc, 0xb8, 0x16, 0x6a, 0xe9, 0x8d, 0x94, 0xb1, 0x2a, 0x4e, 0x1c, 0x27, 0x04, 0xc8, 0x84, 0x4d, + 0x32, 0x2b, 0xab, 0xbc, 0x11, 0x97, 0xbc, 0x57, 0xc8, 0x39, 0x77, 0x0d, 0xfa, 0x27, 0xd1, 0xb8, + 0x35, 0x7f, 0x37, 0x78, 0x9e, 0xb5, 0xf3, 0xb4, 0x0c, 0xd1, 0x94, 0x48, 0x3a, 0x73, 0xfa, 0xcc, + 0x31, 0xd7, 0xa0, 0x55, 0x17, 0xd6, 0x78, 0x18, 0x3b, 0x32, 0xb9, 0xda, 0x85, 0xad, 0x5d, 0x24, + 0x40, 0xfd, 0xe3, 0x94, 0xb1, 0x96, 0x1a, 0x38, 0x9e, 0x70, 0xa0, 0x53, 0xb8, 0xe9, 0xbb, 0x16, + 0xc5, 0xf4, 0x32, 0x55, 0xaa, 0x98, 0x0e, 0x27, 0x38, 0xbd, 0x99, 0xd2, 0x6d, 0xa6, 0x06, 0x9c, + 0x31, 0x74, 0x46, 0x00, 0x7e, 0xba, 0xa8, 0xca, 0x19, 0xe3, 0x01, 0xfa, 0x7b, 0x41, 0x19, 0x75, + 0xc5, 0x07, 0xcd, 0x78, 0xa2, 0xcb, 0xa4, 0x8b, 0xcc, 0x32, 0xa5, 0xca, 0x39, 0xac, 0xb2, 0x80, + 0xda, 0x99, 0x24, 0xfb, 0x85, 0x3d, 0x2c, 0x56, 0x97, 0xbf, 0xa4, 0x13, 0xc1, 0x80, 0x13, 0xa2, + 0xce, 0x2b, 0x00, 0x9b, 0x53, 0xe0, 0x35, 0xf4, 0x70, 0x32, 0xdb, 0xc3, 0x97, 0x2b, 0xf5, 0xb0, + 0xbc, 0xf8, 0xb7, 0x00, 0xc2, 0x7c, 0x56, 0x50, 0x0b, 0x56, 0x87, 0xd4, 0xef, 0x25, 0x9b, 0x44, + 0xd2, 0x25, 0x81, 0xff, 0x53, 0x18, 0x70, 0x62, 0x47, 0xfb, 0x50, 0x22, 0x1e, 0xfb, 0xc5, 0x77, + 0x43, 0x2f, 0x49, 0x2f, 0xe9, 0x8d, 0x68, 0xdc, 0x92, 0x8e, 0xcf, 0x4f, 0x12, 0x23, 0xce, 0xfd, + 0x02, 0xec, 0x53, 0xee, 0x86, 0xbe, 0x41, 0xb9, 0xbc, 0x91, 0x83, 0x71, 0x66, 0xc4, 0xb9, 0x1f, + 0xfd, 0x08, 0x1b, 0xd9, 0xa1, 0x4b, 0x6c, 0xca, 0xe5, 0x4a, 0x1c, 0xb0, 0x15, 0x8d, 0x5b, 0x0d, + 0x3c, 0xed, 0xc0, 0xb3, 0x38, 0x74, 0x04, 0x9b, 0x8e, 0xeb, 0x64, 0x90, 0x3f, 0xf0, 0x19, 0x97, + 0xab, 0x71, 0x68, 0x3c, 0x9f, 0xdd, 0x59, 0x17, 0x9e, 0xc7, 0x76, 0x5e, 0x02, 0x58, 0x79, 0xdf, + 0xb6, 0x57, 0xe7, 0x71, 0x19, 0xd6, 0x3f, 0x6c, 0x93, 0xc9, 0x36, 0x11, 0x23, 0xb8, 0xde, 0x35, + 0xb2, 0xf2, 0x08, 0x3e, 0xbc, 0x3f, 0x5e, 0x00, 0x58, 0x5b, 0xd3, 0xe2, 0xd0, 0x67, 0xab, 0xfe, + 0xfc, 0xfe, 0xaa, 0x97, 0x97, 0xfb, 0x1f, 0xcc, 0xf4, 0x47, 0x5f, 0xc3, 0x5a, 0x36, 0xec, 0x71, + 0xb1, 0x52, 0x9e, 0x3c, 0xdb, 0x07, 0x78, 0x82, 0x40, 0x6d, 0x58, 0x19, 0x30, 0xa7, 0x2f, 0x97, + 0x63, 0xe4, 0x47, 0x29, 0xb2, 0x72, 0xca, 0x9c, 0x3e, 0x8e, 0x3d, 0x02, 0xe1, 0x10, 0x3b, 0xf9, + 0x21, 0x9e, 0x42, 0x88, 0x31, 0xc7, 0xb1, 0x47, 0x68, 0xb5, 0x99, 0x3e, 0xa6, 0x09, 0x1f, 0x28, + 0xe4, 0x9b, 0xae, 0xaf, 0xbc, 0x4a, 0x7d, 0xf7, 0x67, 0x47, 0x1a, 0x94, 0xc4, 0x27, 0xf7, 0x88, + 0x41, 0xe5, 0x4a, 0x0c, 0xdb, 0x4a, 0x61, 0x52, 0x37, 0x73, 0xe0, 0x1c, 0xa3, 0x1f, 0x8d, 0xee, + 0x94, 0xd2, 0xcd, 0x9d, 0x52, 0xba, 0xbd, 0x53, 0x4a, 0xff, 0x47, 0x0a, 0x18, 0x45, 0x0a, 0xb8, + 0x89, 0x14, 0x70, 0x1b, 0x29, 0xe0, 0x75, 0xa4, 0x80, 0x27, 0x6f, 0x94, 0xd2, 0x5f, 0x3b, 0x05, + 0x7f, 0x79, 0xdf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0xfb, 0x5a, 0x79, 0x0c, 0x0b, 0x00, 0x00, } func (m *AggregationRule) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 53c252554..8bfbd0c8a 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -26,14 +26,15 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/rbac/v1beta1"; // AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole message AggregationRule { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -41,10 +42,11 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -60,10 +62,11 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can only reference a ClusterRole in the global namespace. @@ -76,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -87,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -97,26 +100,31 @@ message ClusterRoleList { // about who the rule applies to or which namespace the rule applies to. message PolicyRule { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic repeated string verbs = 1; // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic repeated string apiGroups = 2; // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic repeated string resources = 3; // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic repeated string resourceNames = 4; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic repeated string nonResourceURLs = 5; } @@ -125,10 +133,11 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic repeated PolicyRule rules = 2; } @@ -139,10 +148,11 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic repeated Subject subjects = 2; // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -155,7 +165,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -166,7 +176,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 96e6b18f5..9cfaaceb9 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -48,24 +48,29 @@ const ( // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. + // +listType=atomic Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. + // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. // +optional + // +listType=atomic APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. // +optional + // +listType=atomic Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. // +optional + // +listType=atomic ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional + // +listType=atomic NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` } @@ -79,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -115,6 +120,7 @@ type Role struct { // Rules holds all the PolicyRules for this Role // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -137,6 +143,7 @@ type RoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. @@ -198,6 +205,7 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole // +optional + // +listType=atomic Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be @@ -211,6 +219,7 @@ type AggregationRule struct { // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. // If any of the selectors match, then the ClusterRole's permissions will be added // +optional + // +listType=atomic ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } @@ -233,6 +242,7 @@ type ClusterRoleBinding struct { // Subjects holds references to the objects the role applies to. // +optional + // +listType=atomic Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` // RoleRef can only reference a ClusterRole in the global namespace. diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 5d57cb348..fff1fe40f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ @@ -81,7 +81,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", diff --git a/vendor/k8s.io/api/resource/v1alpha3/doc.go b/vendor/k8s.io/api/resource/v1alpha3/doc.go new file mode 100644 index 000000000..aeb66561f --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package + +// +groupName=resource.k8s.io + +// Package v1alpha3 is the v1alpha3 version of the resource API. +package v1alpha3 // import "k8s.io/api/resource/v1alpha3" diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go new file mode 100644 index 000000000..4ac01cc6f --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -0,0 +1,8987 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/resource/v1alpha3/generated.proto + +package v1alpha3 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{0} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *BasicDevice) Reset() { *m = BasicDevice{} } +func (*BasicDevice) ProtoMessage() {} +func (*BasicDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{1} +} +func (m *BasicDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BasicDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicDevice.Merge(m, src) +} +func (m *BasicDevice) XXX_Size() int { + return m.Size() +} +func (m *BasicDevice) XXX_DiscardUnknown() { + xxx_messageInfo_BasicDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicDevice proto.InternalMessageInfo + +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{2} +} +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) +} +func (m *CELDeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{3} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{4} +} +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) +} +func (m *DeviceAllocationConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo + +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{5} +} +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) +} +func (m *DeviceAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo + +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{6} +} +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) +} +func (m *DeviceAttribute) XXX_Size() int { + return m.Size() +} +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo + +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{7} +} +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) +} +func (m *DeviceClaim) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo + +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{8} +} +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) +} +func (m *DeviceClaimConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo + +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{9} +} +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) +} +func (m *DeviceClass) XXX_Size() int { + return m.Size() +} +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo + +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{10} +} +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) +} +func (m *DeviceClassConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo + +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{11} +} +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) +} +func (m *DeviceClassList) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo + +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{12} +} +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) +} +func (m *DeviceClassSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{13} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{14} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{15} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{16} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{17} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{18} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo + +func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } +func (*PodSchedulingContext) ProtoMessage() {} +func (*PodSchedulingContext) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{19} +} +func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContext.Merge(m, src) +} +func (m *PodSchedulingContext) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo + +func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } +func (*PodSchedulingContextList) ProtoMessage() {} +func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{20} +} +func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextList.Merge(m, src) +} +func (m *PodSchedulingContextList) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo + +func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } +func (*PodSchedulingContextSpec) ProtoMessage() {} +func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{21} +} +func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) +} +func (m *PodSchedulingContextSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo + +func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } +func (*PodSchedulingContextStatus) ProtoMessage() {} +func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{22} +} +func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) +} +func (m *PodSchedulingContextStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{23} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{24} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{25} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } +func (*ResourceClaimSchedulingStatus) ProtoMessage() {} +func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{26} +} +func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src) +} +func (m *ResourceClaimSchedulingStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{27} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{28} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{29} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{30} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{31} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{32} +} +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) +} +func (m *ResourcePool) XXX_Size() int { + return m.Size() +} +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo + +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{33} +} +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) +} +func (m *ResourceSlice) XXX_Size() int { + return m.Size() +} +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo + +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{34} +} +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) +} +func (m *ResourceSliceList) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo + +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{35} +} +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) +} +func (m *ResourceSliceSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult") + proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.DeviceAttribute") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1alpha3.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration") + proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContext") + proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextList") + proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextSpec") + proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextStatus") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList") + proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSchedulingStatus") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1alpha3.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha3.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2) +} + +var fileDescriptor_66649ee9bbcd89d2 = []byte{ + // 2085 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, + 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, + 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, + 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, + 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, + 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, + 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, + 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, + 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, + 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, + 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, + 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, + 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, + 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, + 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, + 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, + 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, + 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, + 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, + 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, + 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, + 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, + 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, + 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, + 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, + 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, + 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, + 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, + 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, + 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, + 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, + 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, + 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, + 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, + 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, + 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, + 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, + 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, + 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, + 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, + 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, + 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, + 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, + 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, + 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, + 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, + 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, + 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, + 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, + 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, + 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, + 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, + 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, + 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, + 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, + 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, + 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, + 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, + 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, + 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, + 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, + 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, + 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, + 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, + 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, + 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, + 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, + 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, + 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, + 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, + 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, + 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, + 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, + 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, + 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, + 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, + 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, + 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, + 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, + 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, + 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, + 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, + 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, + 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, + 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, + 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, + 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, + 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, + 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, + 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, + 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, + 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, + 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, + 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, + 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, + 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, + 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, + 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, + 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, + 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, + 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, + 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, + 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, + 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, + 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, + 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, + 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, + 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, + 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, + 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, + 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, + 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, + 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, + 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, + 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, + 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, + 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, + 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, + 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, + 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, + 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, + 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, + 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, + 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, + 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, + 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, + 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, + 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, + 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, + 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, + 0x20, 0x64, 0x20, 0x00, 0x00, +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Controller) + copy(dAtA[i:], m.Controller) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) + i-- + dAtA[i] = 0x22 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BasicDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Basic != nil { + { + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VersionValue != nil { + i -= len(*m.VersionValue) + copy(dAtA[i:], *m.VersionValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) + i-- + dAtA[i] = 0x2a + } + if m.StringValue != nil { + i -= len(*m.StringValue) + copy(dAtA[i:], *m.StringValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) + i-- + dAtA[i] = 0x22 + } + if m.BoolValue != nil { + i-- + if *m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SuitableNodes != nil { + { + size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x22 + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x1a + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PotentialNodes) > 0 { + for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PotentialNodes[iNdEx]) + copy(dAtA[i:], m.PotentialNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.SelectedNode) + copy(dAtA[i:], m.SelectedNode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceClaims) > 0 { + for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.UnsuitableNodes) > 0 { + for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UnsuitableNodes[iNdEx]) + copy(dAtA[i:], m.UnsuitableNodes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Controller) + copy(dAtA[i:], m.Controller) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.DeallocationRequested { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Controller) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BasicDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SuitableNodes != nil { + l = m.SuitableNodes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + n += 2 + return n +} + +func (m *DeviceRequestAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSchedulingContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSchedulingContextList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSchedulingContextSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SelectedNode) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PotentialNodes) > 0 { + for _, s := range m.PotentialNodes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSchedulingContextStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceClaims) > 0 { + for _, e := range m.ResourceClaims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSchedulingStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UnsuitableNodes) > 0 { + for _, s := range m.UnsuitableNodes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Controller) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllocationResult{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, + `}`, + }, "") + return s +} +func (this *BasicDevice) String() string { + if this == nil { + return "nil" + } + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&BasicDevice{`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *CELDeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaim) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]DeviceRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequests += "}" + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, + `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaimConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + fmt.Sprintf("%v", this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OpaqueDeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingContext{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodSchedulingContext{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodSchedulingContextList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSchedulingContextSpec{`, + `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, + `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, + `}`, + }, "") + return s +} +func (this *PodSchedulingContextStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{" + for _, f := range this.ResourceClaims { + repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceClaims += "}" + s := strings.Join([]string{`&PodSchedulingContextStatus{`, + `ResourceClaims:` + repeatedStringForResourceClaims + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSchedulingStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, + `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v1.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BasicDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Basic == nil { + m.Basic = &BasicDevice{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SuitableNodes == nil { + m.SuitableNodes = &v1.NodeSelector{} + } + if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AdminAccess = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSchedulingContext{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelectedNode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) + if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeallocationRequested = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v1.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllNodes = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/vendor/k8s.io/api/resource/v1alpha3/generated.proto new file mode 100644 index 000000000..b4428ad45 --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -0,0 +1,912 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1alpha3; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1alpha3"; + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; + + // Controller is the name of the DRA driver which handled the + // allocation. That driver is also responsible for deallocating the + // claim. It is empty when the claim can be deallocated without + // involving a driver. + // + // A driver may allocate devices provided by other drivers, so this + // driver name here can be different from the driver names listed for + // the results. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional string controller = 4; +} + +// BasicDevice defines one device instance. +message BasicDevice { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 1; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 2; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // +required + optional string expression = 1; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + optional BasicDevice basic = 2; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a claim that has not been allocated yet *and* that claim + // gets allocated through a control plane controller. It is ignored + // when the claim does not use a control plane controller + // for allocation. + // + // Setting this field is optional. If unset, all Nodes are candidates. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional .k8s.io.api.core.v1.NodeSelector suitableNodes = 3; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // +optional + // +default=false + optional bool adminAccess = 6; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +message OpaqueDeviceConfiguration { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // +required + optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; +} + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DRAControlPlaneController +// feature gate. +message PodSchedulingContext { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes where resources for the Pod are needed. + optional PodSchedulingContextSpec spec = 2; + + // Status describes where resources for the Pod can be allocated. + // + // +optional + optional PodSchedulingContextStatus status = 3; +} + +// PodSchedulingContextList is a collection of Pod scheduling objects. +message PodSchedulingContextList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of PodSchedulingContext objects. + repeated PodSchedulingContext items = 2; +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +message PodSchedulingContextSpec { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // + // +optional + optional string selectedNode = 1; + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + repeated string potentialNodes = 2; +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +message PodSchedulingContextStatus { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + repeated ResourceClaimSchedulingStatus resourceClaims = 1; +} + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaim { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + optional ResourceClaimSpec spec = 2; + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + optional ResourceClaimStatus status = 3; +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +message ResourceClaimConsumerReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Resource is the type of resource being referenced, for example "pods". + // +required + optional string resource = 3; + + // Name is the name of resource being referenced. + // +required + optional string name = 4; + + // UID identifies exactly one incarnation of the resource. + // +required + optional string uid = 5; +} + +// ResourceClaimList is a collection of claims. +message ResourceClaimList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claims. + repeated ResourceClaim items = 2; +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +message ResourceClaimSchedulingStatus { + // Name matches the pod.spec.resourceClaims[*].Name field. + // + // +required + optional string name = 1; + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + repeated string unsuitableNodes = 2; +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +message ResourceClaimSpec { + // Devices defines how to request devices. + // + // +optional + optional DeviceClaim devices = 1; + + // Controller is the name of the DRA driver that is meant + // to handle allocation of this claim. If empty, allocation is handled + // by the scheduler while scheduling a pod. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional string controller = 2; +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +message ResourceClaimStatus { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + optional AllocationResult allocation = 1; + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + repeated ResourceClaimConsumerReference reservedFor = 2; + + // Indicates that a claim is to be deallocated. While this is set, + // no new consumers may be added to ReservedFor. + // + // This is only used if the claim needs to be deallocated by a DRA driver. + // That driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + optional bool deallocationRequested = 3; +} + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaimTemplate { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + optional ResourceClaimTemplateSpec spec = 2; +} + +// ResourceClaimTemplateList is a collection of claim templates. +message ResourceClaimTemplateList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claim templates. + repeated ResourceClaimTemplate items = 2; +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +message ResourceClaimTemplateSpec { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + optional ResourceClaimSpec spec = 2; +} + +// ResourcePool describes the pool that ResourceSlices belong to. +message ResourcePool { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + optional string name = 1; + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + optional int64 generation = 2; + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + optional int64 resourceSliceCount = 3; +} + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceSlice { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + optional ResourceSliceSpec spec = 2; +} + +// ResourceSliceList is a collection of ResourceSlices. +message ResourceSliceList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource ResourceSlices. + repeated ResourceSlice items = 2; +} + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +message ResourceSliceSpec { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + optional string driver = 1; + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + optional ResourcePool pool = 2; + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + optional string nodeName = 3; + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4; + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional bool allNodes = 5; + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + repeated Device devices = 6; +} + diff --git a/vendor/k8s.io/api/resource/v1alpha3/register.go b/vendor/k8s.io/api/resource/v1alpha3/register.go new file mode 100644 index 000000000..74044e8cf --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "resource.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha3"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DeviceClass{}, + &DeviceClassList{}, + &ResourceClaim{}, + &ResourceClaimList{}, + &ResourceClaimTemplate{}, + &ResourceClaimTemplateList{}, + &PodSchedulingContext{}, + &PodSchedulingContextList{}, + &ResourceSlice{}, + &ResourceSliceList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/resource/v1alpha3/types.go b/vendor/k8s.io/api/resource/v1alpha3/types.go new file mode 100644 index 000000000..4efd2491d --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -0,0 +1,1048 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + // Finalizer is the finalizer that gets set for claims + // which were allocated through a builtin controller. + // Reserved for use by Kubernetes, DRA driver controllers must + // use their own finalizer. + Finalizer = "resource.kubernetes.io/delete-protection" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +const ( + // ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.NodeName]. + ResourceSliceSelectorNodeName = "spec.nodeName" + // ResourceSliceSelectorDriver can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.Driver]. + ResourceSliceSelectorDriver = "spec.driver" +) + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +type ResourceSliceSpec struct { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"` + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"` + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"` + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"` + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` +} + +// ResourcePool describes the pool that ResourceSlices belong to. +type ResourcePool struct { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"` + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"` +} + +const ResourceSliceMaxSharedCapacity = 128 +const ResourceSliceMaxDevices = 128 +const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name. + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +type Device struct { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"` +} + +// BasicDevice defines one device instance. +type BasicDevice struct { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"` + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Capacity map[QualifiedName]resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"` +} + +// Limit for the sum of the number of entries in both ResourceSlices. +const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32 + +// QualifiedName is the name of a device attribute or capacity. +// +// Attributes and capacities are defined either by the owner of the specific +// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes +// project). Because they are sometimes compared across devices, a given name +// is expected to mean the same thing and have the same type on all devices. +// +// Names must be either a C identifier (e.g. "theName") or a DNS subdomain +// followed by a slash ("/") followed by a C identifier +// (e.g. "dra.example.com/theName"). Names which do not include the +// domain prefix are assumed to be part of the driver's domain. Attributes +// or capacities defined by 3rd parties must include the domain prefix. +// +// The maximum length for the DNS subdomain is 63 characters (same as +// for driver names) and the maximum length of the C identifier +// is 32. +type QualifiedName string + +// FullyQualifiedName is a QualifiedName where the domain is set. +type FullyQualifiedName string + +// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`). +const DeviceMaxIDLength = 32 + +// DeviceAttribute must have exactly one field set. +type DeviceAttribute struct { + // The Go field names below have a Value suffix to avoid a conflict between the + // field "String" and the corresponding method. That method is required. + // The Kubernetes API is defined without that suffix to keep it more natural. + + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"` + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"` + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"` + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"` +} + +// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value. +const DeviceAttributeMaxValueLength = 64 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ResourceSliceList is a collection of ResourceSlices. +type ResourceSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource ResourceSlices. + Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +type ResourceClaimSpec struct { + // Devices defines how to request devices. + // + // +optional + Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` + + // Controller is the name of the DRA driver that is meant + // to handle allocation of this claim. If empty, allocation is handled + // by the scheduler while scheduling a pod. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +type DeviceClaim struct { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"` + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"` + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"` +} + +const ( + DeviceRequestsMaxSize = AllocationResultsMaxSize + DeviceConstraintsMaxSize = 32 + DeviceConfigMaxSize = 32 +) + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +type DeviceRequest struct { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"` + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"` + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"` + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"` + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // +optional + // +default=false + AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` +} + +const ( + DeviceSelectorsMaxSize = 32 +) + +type DeviceAllocationMode string + +// Valid [DeviceRequest.CountMode] values. +const ( + DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount") + DeviceAllocationModeAll = DeviceAllocationMode("All") +) + +// DeviceSelector must have exactly one field set. +type DeviceSelector struct { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"` +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +type CELDeviceSelector struct { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // +required + Expression string `json:"expression" protobuf:"bytes,1,name=expression"` +} + +// DeviceConstraint must have exactly one field set besides Requests. +type DeviceConstraint struct { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"` +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +type DeviceClaimConfiguration struct { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"` +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +type DeviceConfiguration struct { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"` +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +type OpaqueDeviceConfiguration struct { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // +required + Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"` +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +type ResourceClaimStatus struct { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"` + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 32 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` + + // Indicates that a claim is to be deallocated. While this is set, + // no new consumers may be added to ReservedFor. + // + // This is only used if the claim needs to be deallocated by a DRA driver. + // That driver then must deallocate this claim and reset the field + // together with clearing the Allocation field. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` +} + +// ReservedForMaxSize is the maximum number of entries in +// claim.status.reservedFor. +const ResourceClaimReservedForMaxSize = 32 + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +type ResourceClaimConsumerReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the type of resource being referenced, for example "pods". + // +required + Resource string `json:"resource" protobuf:"bytes,3,name=resource"` + // Name is the name of resource being referenced. + // +required + Name string `json:"name" protobuf:"bytes,4,name=name"` + // UID identifies exactly one incarnation of the resource. + // +required + UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` +} + +// AllocationResult contains attributes of an allocated resource. +type AllocationResult struct { + // Devices is the result of allocating devices. + // + // +optional + Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"` + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` + + // Controller is the name of the DRA driver which handled the + // allocation. That driver is also responsible for deallocating the + // claim. It is empty when the claim can be deallocated without + // involving a driver. + // + // A driver may allocate devices provided by other drivers, so this + // driver name here can be different from the driver names listed for + // the results. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` +} + +// DeviceAllocationResult is the result of allocating devices. +type DeviceAllocationResult struct { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"` + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// AllocationResultsMaxSize represents the maximum number of +// entries in allocation.devices.results. +const AllocationResultsMaxSize = 32 + +// DeviceRequestAllocationResult contains the allocation result for one request. +type DeviceRequestAllocationResult struct { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + Request string `json:"request" protobuf:"bytes,1,name=request"` + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,2,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,3,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,4,name=device"` +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +type DeviceAllocationConfiguration struct { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"` + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"` +} + +type AllocationConfigSource string + +// Valid [DeviceAllocationConfiguration.Source] values. +const ( + AllocationConfigSourceClass = "FromClass" + AllocationConfigSourceClaim = "FromClaim" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimList is a collection of claims. +type ResourceClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claims. + Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContext objects hold information that is needed to schedule +// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation +// mode. +// +// This is an alpha type and requires enabling the DRAControlPlaneController +// feature gate. +type PodSchedulingContext struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes where resources for the Pod are needed. + Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes where resources for the Pod can be allocated. + // + // +optional + Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodSchedulingContextSpec describes where resources for the Pod are needed. +type PodSchedulingContextSpec struct { + // SelectedNode is the node for which allocation of ResourceClaims that + // are referenced by the Pod and that use "WaitForFirstConsumer" + // allocation is to be attempted. + // + // +optional + SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` + + // PotentialNodes lists nodes where the Pod might be able to run. + // + // The size of this field is limited to 128. This is large enough for + // many clusters. Larger clusters may need more attempts to find a node + // that suits all pending resources. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` +} + +// PodSchedulingContextStatus describes where resources for the Pod can be allocated. +type PodSchedulingContextStatus struct { + // ResourceClaims describes resource availability for each + // pod.spec.resourceClaim entry where the corresponding ResourceClaim + // uses "WaitForFirstConsumer" allocation mode. + // + // +listType=map + // +listMapKey=name + // +optional + ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` + + // If there ever is a need to support other kinds of resources + // than ResourceClaim, then new fields could get added here + // for those other resources. +} + +// ResourceClaimSchedulingStatus contains information about one particular +// ResourceClaim with "WaitForFirstConsumer" allocation mode. +type ResourceClaimSchedulingStatus struct { + // Name matches the pod.spec.resourceClaims[*].Name field. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // UnsuitableNodes lists nodes that the ResourceClaim cannot be + // allocated for. + // + // The size of this field is limited to 128, the same as for + // PodSchedulingSpec.PotentialNodes. This may get increased in the + // future, but not reduced. + // + // +optional + // +listType=atomic + UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` +} + +// PodSchedulingNodeListMaxSize defines the maximum number of entries in the +// node lists that are stored in PodSchedulingContext objects. This limit is part +// of the API. +const PodSchedulingNodeListMaxSize = 128 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// PodSchedulingContextList is a collection of Pod scheduling objects. +type PodSchedulingContextList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of PodSchedulingContext objects. + Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type DeviceClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +type DeviceClassSpec struct { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"` + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + + // Only nodes matching the selector will be considered by the scheduler + // when trying to find a Node that fits a Pod when that Pod uses + // a claim that has not been allocated yet *and* that claim + // gets allocated through a control plane controller. It is ignored + // when the claim does not use a control plane controller + // for allocation. + // + // Setting this field is optional. If unset, all Nodes are candidates. + // + // This is an alpha field and requires enabling the DRAControlPlaneController + // feature gate. + // + // +optional + // +featureGate=DRAControlPlaneController + SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` +} + +// DeviceClassConfiguration is used in DeviceClass. +type DeviceClassConfiguration struct { + DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// DeviceClassList is a collection of classes. +type DeviceClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource classes. + Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaimTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +type ResourceClaimTemplateSpec struct { + // ObjectMeta may contain labels and annotations that will be copied into the PVC + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.26 + +// ResourceClaimTemplateList is a collection of claim templates. +type ResourceClaimTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claim templates. + Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go new file mode 100644 index 000000000..1a44a971d --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -0,0 +1,404 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocationResult = map[string]string{ + "": "AllocationResult contains attributes of an allocated resource.", + "devices": "Devices is the result of allocating devices.", + "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", + "controller": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (AllocationResult) SwaggerDoc() map[string]string { + return map_AllocationResult +} + +var map_BasicDevice = map[string]string{ + "": "BasicDevice defines one device instance.", + "attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", +} + +func (BasicDevice) SwaggerDoc() map[string]string { + return map_BasicDevice +} + +var map_CELDeviceSelector = map[string]string{ + "": "CELDeviceSelector contains a CEL expression for selecting a device.", + "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)", +} + +func (CELDeviceSelector) SwaggerDoc() map[string]string { + return map_CELDeviceSelector +} + +var map_Device = map[string]string{ + "": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "basic": "Basic defines one device instance.", +} + +func (Device) SwaggerDoc() map[string]string { + return map_Device +} + +var map_DeviceAllocationConfiguration = map[string]string{ + "": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "source": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", +} + +func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string { + return map_DeviceAllocationConfiguration +} + +var map_DeviceAllocationResult = map[string]string{ + "": "DeviceAllocationResult is the result of allocating devices.", + "results": "Results lists all allocated devices.", + "config": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", +} + +func (DeviceAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceAllocationResult +} + +var map_DeviceAttribute = map[string]string{ + "": "DeviceAttribute must have exactly one field set.", + "int": "IntValue is a number.", + "bool": "BoolValue is a true/false value.", + "string": "StringValue is a string. Must not be longer than 64 characters.", + "version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", +} + +func (DeviceAttribute) SwaggerDoc() map[string]string { + return map_DeviceAttribute +} + +var map_DeviceClaim = map[string]string{ + "": "DeviceClaim defines how to request devices with a ResourceClaim.", + "requests": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", + "constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.", + "config": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", +} + +func (DeviceClaim) SwaggerDoc() map[string]string { + return map_DeviceClaim +} + +var map_DeviceClaimConfiguration = map[string]string{ + "": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", +} + +func (DeviceClaimConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClaimConfiguration +} + +var map_DeviceClass = map[string]string{ + "": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (DeviceClass) SwaggerDoc() map[string]string { + return map_DeviceClass +} + +var map_DeviceClassConfiguration = map[string]string{ + "": "DeviceClassConfiguration is used in DeviceClass.", +} + +func (DeviceClassConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClassConfiguration +} + +var map_DeviceClassList = map[string]string{ + "": "DeviceClassList is a collection of classes.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource classes.", +} + +func (DeviceClassList) SwaggerDoc() map[string]string { + return map_DeviceClassList +} + +var map_DeviceClassSpec = map[string]string{ + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", + "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (DeviceClassSpec) SwaggerDoc() map[string]string { + return map_DeviceClassSpec +} + +var map_DeviceConfiguration = map[string]string{ + "": "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.", + "opaque": "Opaque provides driver-specific configuration parameters.", +} + +func (DeviceConfiguration) SwaggerDoc() map[string]string { + return map_DeviceConfiguration +} + +var map_DeviceConstraint = map[string]string{ + "": "DeviceConstraint must have exactly one field set besides Requests.", + "requests": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", +} + +func (DeviceConstraint) SwaggerDoc() map[string]string { + return map_DeviceConstraint +} + +var map_DeviceRequest = map[string]string{ + "": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", + "name": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", + "deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", + "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", + "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.", +} + +func (DeviceRequest) SwaggerDoc() map[string]string { + return map_DeviceRequest +} + +var map_DeviceRequestAllocationResult = map[string]string{ + "": "DeviceRequestAllocationResult contains the allocation result for one request.", + "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", +} + +func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceRequestAllocationResult +} + +var map_DeviceSelector = map[string]string{ + "": "DeviceSelector must have exactly one field set.", + "cel": "CEL contains a CEL expression for selecting a device.", +} + +func (DeviceSelector) SwaggerDoc() map[string]string { + return map_DeviceSelector +} + +var map_OpaqueDeviceConfiguration = map[string]string{ + "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.", +} + +func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { + return map_OpaqueDeviceConfiguration +} + +var map_PodSchedulingContext = map[string]string{ + "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes where resources for the Pod are needed.", + "status": "Status describes where resources for the Pod can be allocated.", +} + +func (PodSchedulingContext) SwaggerDoc() map[string]string { + return map_PodSchedulingContext +} + +var map_PodSchedulingContextList = map[string]string{ + "": "PodSchedulingContextList is a collection of Pod scheduling objects.", + "metadata": "Standard list metadata", + "items": "Items is the list of PodSchedulingContext objects.", +} + +func (PodSchedulingContextList) SwaggerDoc() map[string]string { + return map_PodSchedulingContextList +} + +var map_PodSchedulingContextSpec = map[string]string{ + "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", + "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", + "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", +} + +func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { + return map_PodSchedulingContextSpec +} + +var map_PodSchedulingContextStatus = map[string]string{ + "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", + "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", +} + +func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { + return map_PodSchedulingContextStatus +} + +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes what is being requested and how to configure it. The spec is immutable.", + "status": "Status describes whether the claim is ready to use and what has been allocated.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + +var map_ResourceClaimConsumerReference = map[string]string{ + "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "resource": "Resource is the type of resource being referenced, for example \"pods\".", + "name": "Name is the name of resource being referenced.", + "uid": "UID identifies exactly one incarnation of the resource.", +} + +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { + return map_ResourceClaimConsumerReference +} + +var map_ResourceClaimList = map[string]string{ + "": "ResourceClaimList is a collection of claims.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claims.", +} + +func (ResourceClaimList) SwaggerDoc() map[string]string { + return map_ResourceClaimList +} + +var map_ResourceClaimSchedulingStatus = map[string]string{ + "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", + "name": "Name matches the pod.spec.resourceClaims[*].Name field.", + "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", +} + +func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimSchedulingStatus +} + +var map_ResourceClaimSpec = map[string]string{ + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", + "controller": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (ResourceClaimSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimSpec +} + +var map_ResourceClaimStatus = map[string]string{ + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "deallocationRequested": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", +} + +func (ResourceClaimStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimStatus +} + +var map_ResourceClaimTemplate = map[string]string{ + "": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", +} + +func (ResourceClaimTemplate) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplate +} + +var map_ResourceClaimTemplateList = map[string]string{ + "": "ResourceClaimTemplateList is a collection of claim templates.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claim templates.", +} + +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateList +} + +var map_ResourceClaimTemplateSpec = map[string]string{ + "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", +} + +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateSpec +} + +var map_ResourcePool = map[string]string{ + "": "ResourcePool describes the pool that ResourceSlices belong to.", + "name": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", + "generation": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", +} + +func (ResourcePool) SwaggerDoc() map[string]string { + return map_ResourcePool +} + +var map_ResourceSlice = map[string]string{ + "": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (ResourceSlice) SwaggerDoc() map[string]string { + return map_ResourceSlice +} + +var map_ResourceSliceList = map[string]string{ + "": "ResourceSliceList is a collection of ResourceSlices.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource ResourceSlices.", +} + +func (ResourceSliceList) SwaggerDoc() map[string]string { + return map_ResourceSliceList +} + +var map_ResourceSliceSpec = map[string]string{ + "": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", + "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", + "pool": "Pool describes the pool that this ResourceSlice belongs to.", + "nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", + "nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", +} + +func (ResourceSliceSpec) SwaggerDoc() map[string]string { + return map_ResourceSliceSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 000000000..58171df1f --- /dev/null +++ b/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,927 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. +func (in *AllocationResult) DeepCopy() *AllocationResult { + if in == nil { + return nil + } + out := new(AllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicDevice) DeepCopyInto(out *BasicDevice) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[QualifiedName]DeviceAttribute, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(map[QualifiedName]resource.Quantity, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice. +func (in *BasicDevice) DeepCopy() *BasicDevice { + if in == nil { + return nil + } + out := new(BasicDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector. +func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector { + if in == nil { + return nil + } + out := new(CELDeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicDevice) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration. +func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration { + if in == nil { + return nil + } + out := new(DeviceAllocationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) { + *out = *in + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]DeviceRequestAllocationResult, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceAllocationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult. +func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult { + if in == nil { + return nil + } + out := new(DeviceAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) { + *out = *in + if in.IntValue != nil { + in, out := &in.IntValue, &out.IntValue + *out = new(int64) + **out = **in + } + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(bool) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } + if in.VersionValue != nil { + in, out := &in.VersionValue, &out.VersionValue + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute. +func (in *DeviceAttribute) DeepCopy() *DeviceAttribute { + if in == nil { + return nil + } + out := new(DeviceAttribute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]DeviceRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]DeviceConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClaimConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim. +func (in *DeviceClaim) DeepCopy() *DeviceClaim { + if in == nil { + return nil + } + out := new(DeviceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration. +func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration { + if in == nil { + return nil + } + out := new(DeviceClaimConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClass) DeepCopyInto(out *DeviceClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass. +func (in *DeviceClass) DeepCopy() *DeviceClass { + if in == nil { + return nil + } + out := new(DeviceClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) { + *out = *in + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration. +func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration { + if in == nil { + return nil + } + out := new(DeviceClassConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeviceClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList. +func (in *DeviceClassList) DeepCopy() *DeviceClassList { + if in == nil { + return nil + } + out := new(DeviceClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClassConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SuitableNodes != nil { + in, out := &in.SuitableNodes, &out.SuitableNodes + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec. +func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec { + if in == nil { + return nil + } + out := new(DeviceClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) { + *out = *in + if in.Opaque != nil { + in, out := &in.Opaque, &out.Opaque + *out = new(OpaqueDeviceConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration. +func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration { + if in == nil { + return nil + } + out := new(DeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MatchAttribute != nil { + in, out := &in.MatchAttribute, &out.MatchAttribute + *out = new(FullyQualifiedName) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint. +func (in *DeviceConstraint) DeepCopy() *DeviceConstraint { + if in == nil { + return nil + } + out := new(DeviceConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest. +func (in *DeviceRequest) DeepCopy() *DeviceRequest { + if in == nil { + return nil + } + out := new(DeviceRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult. +func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult { + if in == nil { + return nil + } + out := new(DeviceRequestAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) { + *out = *in + if in.CEL != nil { + in, out := &in.CEL, &out.CEL + *out = new(CELDeviceSelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector. +func (in *DeviceSelector) DeepCopy() *DeviceSelector { + if in == nil { + return nil + } + out := new(DeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { + *out = *in + in.Parameters.DeepCopyInto(&out.Parameters) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { + if in == nil { + return nil + } + out := new(OpaqueDeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. +func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { + if in == nil { + return nil + } + out := new(PodSchedulingContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSchedulingContext, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. +func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { + if in == nil { + return nil + } + out := new(PodSchedulingContextList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { + *out = *in + if in.PotentialNodes != nil { + in, out := &in.PotentialNodes, &out.PotentialNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. +func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { + if in == nil { + return nil + } + out := new(PodSchedulingContextSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { + *out = *in + if in.ResourceClaims != nil { + in, out := &in.ResourceClaims, &out.ResourceClaims + *out = make([]ResourceClaimSchedulingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. +func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { + if in == nil { + return nil + } + out := new(PodSchedulingContextStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. +func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { + if in == nil { + return nil + } + out := new(ResourceClaimConsumerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. +func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { + if in == nil { + return nil + } + out := new(ResourceClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { + *out = *in + if in.UnsuitableNodes != nil { + in, out := &in.UnsuitableNodes, &out.UnsuitableNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. +func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { + if in == nil { + return nil + } + out := new(ResourceClaimSchedulingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. +func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { + if in == nil { + return nil + } + out := new(ResourceClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { + *out = *in + if in.Allocation != nil { + in, out := &in.Allocation, &out.Allocation + *out = new(AllocationResult) + (*in).DeepCopyInto(*out) + } + if in.ReservedFor != nil { + in, out := &in.ReservedFor, &out.ReservedFor + *out = make([]ResourceClaimConsumerReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. +func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { + if in == nil { + return nil + } + out := new(ResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. +func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { + if in == nil { + return nil + } + out := new(ResourceClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. +func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. +func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool. +func (in *ResourcePool) DeepCopy() *ResourcePool { + if in == nil { + return nil + } + out := new(ResourcePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice. +func (in *ResourceSlice) DeepCopy() *ResourceSlice { + if in == nil { + return nil + } + out := new(ResourceSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList. +func (in *ResourceSliceList) DeepCopy() *ResourceSliceList { + if in == nil { + return nil + } + out := new(ResourceSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) { + *out = *in + out.Pool = in.Pool + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec. +func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec { + if in == nil { + return nil + } + out := new(ResourceSliceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go index 76c4da002..ee3c66847 100644 --- a/vendor/k8s.io/api/scheduling/v1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=scheduling.k8s.io package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go index c5ef2f50e..6fef1a937 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto +// source: k8s.io/api/scheduling/v1/generated.proto package v1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_277b2f43b72fffd5, []int{0} + return fileDescriptor_3f12bd05064e996e, []int{0} } func (m *PriorityClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_PriorityClass proto.InternalMessageInfo func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } func (*PriorityClassList) ProtoMessage() {} func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_277b2f43b72fffd5, []int{1} + return fileDescriptor_3f12bd05064e996e, []int{1} } func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,42 +107,41 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) + proto.RegisterFile("k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_3f12bd05064e996e) } -var fileDescriptor_277b2f43b72fffd5 = []byte{ - // 488 bytes of a gzipped FileDescriptorProto +var fileDescriptor_3f12bd05064e996e = []byte{ + // 476 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, - 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, - 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, - 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, - 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, - 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, - 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, - 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, - 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, - 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, - 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, - 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, - 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, - 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, - 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, - 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, - 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, - 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, - 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, - 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, - 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, - 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, - 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, - 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, - 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, - 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, - 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, - 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, - 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, - 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, + 0x18, 0xc6, 0xe3, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xea, 0xe0, 0x46, 0xbd, 0x81, + 0x2c, 0xd8, 0xf4, 0x04, 0x08, 0xe9, 0x24, 0x86, 0x70, 0x12, 0x42, 0x3a, 0x44, 0x95, 0x81, 0x01, + 0x31, 0xe0, 0xa6, 0x3e, 0xd7, 0x34, 0x89, 0x23, 0xdb, 0xa9, 0xd4, 0x8d, 0x8f, 0xc0, 0x37, 0x62, + 0xed, 0x78, 0xe3, 0x4d, 0x15, 0x0d, 0x1f, 0x81, 0x8d, 0x09, 0x25, 0x2d, 0x97, 0xfe, 0xb9, 0x0a, + 0xb6, 0xbc, 0xef, 0xfb, 0xfc, 0x1e, 0xdb, 0x4f, 0x6c, 0xe8, 0x4f, 0x5f, 0x6a, 0x2c, 0x24, 0xa1, + 0x99, 0x20, 0x3a, 0x9a, 0xb0, 0x71, 0x1e, 0x8b, 0x94, 0x93, 0xd9, 0x80, 0x70, 0x96, 0x32, 0x45, + 0x0d, 0x1b, 0xe3, 0x4c, 0x49, 0x23, 0x1d, 0x77, 0xad, 0xc4, 0x34, 0x13, 0xb8, 0x56, 0xe2, 0xd9, + 0xa0, 0xfb, 0x84, 0x0b, 0x33, 0xc9, 0x47, 0x38, 0x92, 0x09, 0xe1, 0x92, 0x4b, 0x52, 0x01, 0xa3, + 0xfc, 0xaa, 0xaa, 0xaa, 0xa2, 0xfa, 0x5a, 0x1b, 0x75, 0xfb, 0x5b, 0x4b, 0x46, 0x52, 0xb1, 0x3b, + 0x16, 0xeb, 0x3e, 0xab, 0x35, 0x09, 0x8d, 0x26, 0x22, 0x65, 0x6a, 0x4e, 0xb2, 0x29, 0x2f, 0x1b, + 0x9a, 0x24, 0xcc, 0xd0, 0xbb, 0x28, 0x72, 0x8c, 0x52, 0x79, 0x6a, 0x44, 0xc2, 0x0e, 0x80, 0x17, + 0xff, 0x02, 0xca, 0x83, 0x26, 0x74, 0x9f, 0xeb, 0xff, 0x6a, 0xc0, 0xf6, 0x50, 0x09, 0xa9, 0x84, + 0x99, 0xbf, 0x8e, 0xa9, 0xd6, 0xce, 0x67, 0xd8, 0x2c, 0x77, 0x35, 0xa6, 0x86, 0xba, 0xc0, 0x03, + 0x7e, 0xeb, 0xec, 0x29, 0xae, 0x03, 0xbb, 0x35, 0xc7, 0xd9, 0x94, 0x97, 0x0d, 0x8d, 0x4b, 0x35, + 0x9e, 0x0d, 0xf0, 0xfb, 0xd1, 0x17, 0x16, 0x99, 0x77, 0xcc, 0xd0, 0xc0, 0x59, 0x2c, 0x7b, 0x56, + 0xb1, 0xec, 0xc1, 0xba, 0x17, 0xde, 0xba, 0x3a, 0xa7, 0xd0, 0x9e, 0xd1, 0x38, 0x67, 0x6e, 0xc3, + 0x03, 0xbe, 0x1d, 0xb4, 0x37, 0x62, 0xfb, 0x43, 0xd9, 0x0c, 0xd7, 0x33, 0xe7, 0x1c, 0xb6, 0x79, + 0x2c, 0x47, 0x34, 0xbe, 0x60, 0x57, 0x34, 0x8f, 0x8d, 0x7b, 0xe2, 0x01, 0xbf, 0x19, 0x3c, 0xda, + 0x88, 0xdb, 0x6f, 0xb6, 0x87, 0xe1, 0xae, 0xd6, 0x79, 0x0e, 0x5b, 0x63, 0xa6, 0x23, 0x25, 0x32, + 0x23, 0x64, 0xea, 0xde, 0xf3, 0x80, 0x7f, 0x3f, 0x78, 0xb8, 0x41, 0x5b, 0x17, 0xf5, 0x28, 0xdc, + 0xd6, 0x39, 0x1c, 0x76, 0x32, 0xc5, 0x58, 0x52, 0x55, 0x43, 0x19, 0x8b, 0x68, 0xee, 0xda, 0x15, + 0x7b, 0x5e, 0x2c, 0x7b, 0x9d, 0xe1, 0xde, 0xec, 0xf7, 0xb2, 0x77, 0x7a, 0x78, 0x03, 0xf0, 0xbe, + 0x2c, 0x3c, 0x30, 0xed, 0x7f, 0x07, 0xf0, 0xc1, 0x4e, 0xea, 0x97, 0x42, 0x1b, 0xe7, 0xd3, 0x41, + 0xf2, 0xf8, 0xff, 0x92, 0x2f, 0xe9, 0x2a, 0xf7, 0xce, 0xe6, 0x88, 0xcd, 0xbf, 0x9d, 0xad, 0xd4, + 0x2f, 0xa1, 0x2d, 0x0c, 0x4b, 0xb4, 0xdb, 0xf0, 0x4e, 0xfc, 0xd6, 0xd9, 0x63, 0x7c, 0xec, 0x15, + 0xe0, 0x9d, 0x9d, 0xd5, 0xbf, 0xe7, 0x6d, 0x49, 0x87, 0x6b, 0x93, 0xe0, 0xd5, 0x62, 0x85, 0xac, + 0xeb, 0x15, 0xb2, 0x6e, 0x56, 0xc8, 0xfa, 0x5a, 0x20, 0xb0, 0x28, 0x10, 0xb8, 0x2e, 0x10, 0xb8, + 0x29, 0x10, 0xf8, 0x51, 0x20, 0xf0, 0xed, 0x27, 0xb2, 0x3e, 0xba, 0xc7, 0xde, 0xe4, 0x9f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x9a, 0x3d, 0x5f, 0x2e, 0xae, 0x03, 0x00, 0x00, } func (m *PriorityClass) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto index 1f9a7474a..374e68238 100644 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/scheduling/v1"; // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. @@ -35,9 +35,9 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -54,10 +54,9 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional optional string preemptionPolicy = 5; } @@ -67,7 +66,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go index b4ff34767..019dbcd00 100644 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. @@ -34,7 +35,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -51,15 +52,15 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClassList is a collection of priority classes. type PriorityClassList struct { diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go index 7524adf9a..f167e1970 100644 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..a4a432a64 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClass) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 16f3c7cb4..83e504b5a 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +// source: k8s.io/api/scheduling/v1alpha1/generated.proto package v1alpha1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_f033641dd0b95dce, []int{0} + return fileDescriptor_260442fbb28d876a, []int{0} } func (m *PriorityClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_PriorityClass proto.InternalMessageInfo func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } func (*PriorityClassList) ProtoMessage() {} func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_f033641dd0b95dce, []int{1} + return fileDescriptor_260442fbb28d876a, []int{1} } func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,42 +107,41 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) + proto.RegisterFile("k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_260442fbb28d876a) } -var fileDescriptor_f033641dd0b95dce = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, - 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, - 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, - 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, - 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, - 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, - 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, - 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, - 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, - 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, - 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, - 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, - 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, - 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, - 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, - 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, - 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, - 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, - 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, - 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, - 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, - 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, - 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, - 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, - 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, - 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, - 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, - 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, - 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, +var fileDescriptor_260442fbb28d876a = []byte{ + // 480 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x8f, 0xd3, 0x30, + 0x18, 0x86, 0xeb, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xea, 0xe0, 0x46, 0xbd, 0x25, + 0xcb, 0xd9, 0xf4, 0x04, 0x08, 0xe9, 0xb6, 0x50, 0x09, 0x21, 0x81, 0xa8, 0x32, 0x30, 0x20, 0x06, + 0xdc, 0xd4, 0xe7, 0x9a, 0x26, 0x71, 0x64, 0x3b, 0x95, 0xba, 0xf1, 0x13, 0xf8, 0x53, 0x48, 0x1d, + 0x6f, 0xbc, 0xa9, 0xa2, 0xe1, 0x27, 0xb0, 0x31, 0xa1, 0xa4, 0xbd, 0x4b, 0xdb, 0xc0, 0x71, 0x5b, + 0xbe, 0xef, 0x7b, 0xde, 0xd7, 0xf6, 0x1b, 0x1b, 0xe2, 0xf9, 0x4b, 0x8d, 0x85, 0x24, 0x34, 0x15, + 0x44, 0x87, 0x33, 0x36, 0xcd, 0x22, 0x91, 0x70, 0xb2, 0x18, 0xd2, 0x28, 0x9d, 0xd1, 0x21, 0xe1, + 0x2c, 0x61, 0x8a, 0x1a, 0x36, 0xc5, 0xa9, 0x92, 0x46, 0xda, 0x68, 0xcb, 0x63, 0x9a, 0x0a, 0x5c, + 0xf1, 0xf8, 0x86, 0xef, 0x9d, 0x71, 0x61, 0x66, 0xd9, 0x04, 0x87, 0x32, 0x26, 0x5c, 0x72, 0x49, + 0x4a, 0xd9, 0x24, 0xbb, 0x2c, 0xab, 0xb2, 0x28, 0xbf, 0xb6, 0x76, 0xbd, 0xc1, 0xde, 0xf2, 0xa1, + 0x54, 0x8c, 0x2c, 0x6a, 0x4b, 0xf6, 0x9e, 0x55, 0x4c, 0x4c, 0xc3, 0x99, 0x48, 0x98, 0x5a, 0x92, + 0x74, 0xce, 0x8b, 0x86, 0x26, 0x31, 0x33, 0xf4, 0x6f, 0x2a, 0xf2, 0x2f, 0x95, 0xca, 0x12, 0x23, + 0x62, 0x56, 0x13, 0xbc, 0xf8, 0x9f, 0xa0, 0x38, 0x6e, 0x4c, 0x8f, 0x75, 0x83, 0x5f, 0x4d, 0xd8, + 0x19, 0x2b, 0x21, 0x95, 0x30, 0xcb, 0x57, 0x11, 0xd5, 0xda, 0xfe, 0x0c, 0x5b, 0xc5, 0xae, 0xa6, + 0xd4, 0x50, 0x07, 0xb8, 0xc0, 0x6b, 0x9f, 0x3f, 0xc5, 0x55, 0x6c, 0xb7, 0xe6, 0x38, 0x9d, 0xf3, + 0xa2, 0xa1, 0x71, 0x41, 0xe3, 0xc5, 0x10, 0xbf, 0x9f, 0x7c, 0x61, 0xa1, 0x79, 0xc7, 0x0c, 0xf5, + 0xed, 0xd5, 0xba, 0xdf, 0xc8, 0xd7, 0x7d, 0x58, 0xf5, 0x82, 0x5b, 0x57, 0xfb, 0x14, 0x5a, 0x0b, + 0x1a, 0x65, 0xcc, 0x69, 0xba, 0xc0, 0xb3, 0xfc, 0xce, 0x0e, 0xb6, 0x3e, 0x14, 0xcd, 0x60, 0x3b, + 0xb3, 0x2f, 0x60, 0x87, 0x47, 0x72, 0x42, 0xa3, 0x11, 0xbb, 0xa4, 0x59, 0x64, 0x9c, 0x13, 0x17, + 0x78, 0x2d, 0xff, 0xc9, 0x0e, 0xee, 0xbc, 0xde, 0x1f, 0x06, 0x87, 0xac, 0xfd, 0x1c, 0xb6, 0xa7, + 0x4c, 0x87, 0x4a, 0xa4, 0x46, 0xc8, 0xc4, 0x79, 0xe0, 0x02, 0xef, 0xa1, 0xff, 0x78, 0x27, 0x6d, + 0x8f, 0xaa, 0x51, 0xb0, 0xcf, 0xd9, 0x1c, 0x76, 0x53, 0xc5, 0x58, 0x5c, 0x56, 0x63, 0x19, 0x89, + 0x70, 0xe9, 0x58, 0xa5, 0xf6, 0x22, 0x5f, 0xf7, 0xbb, 0xe3, 0xa3, 0xd9, 0xef, 0x75, 0xff, 0xb4, + 0x7e, 0x03, 0xf0, 0x31, 0x16, 0xd4, 0x4c, 0x07, 0xdf, 0x01, 0x7c, 0x74, 0x90, 0xfa, 0x5b, 0xa1, + 0x8d, 0xfd, 0xa9, 0x96, 0x3c, 0xbe, 0x5f, 0xf2, 0x85, 0xba, 0xcc, 0xbd, 0xbb, 0x3b, 0x62, 0xeb, + 0xa6, 0xb3, 0x97, 0x7a, 0x00, 0x2d, 0x61, 0x58, 0xac, 0x9d, 0xa6, 0x7b, 0xe2, 0xb5, 0xcf, 0xcf, + 0xf0, 0xdd, 0x6f, 0x01, 0x1f, 0xec, 0xaf, 0xfa, 0x49, 0x6f, 0x0a, 0x8f, 0x60, 0x6b, 0xe5, 0x8f, + 0x56, 0x1b, 0xd4, 0xb8, 0xda, 0xa0, 0xc6, 0xf5, 0x06, 0x35, 0xbe, 0xe6, 0x08, 0xac, 0x72, 0x04, + 0xae, 0x72, 0x04, 0xae, 0x73, 0x04, 0x7e, 0xe4, 0x08, 0x7c, 0xfb, 0x89, 0x1a, 0x1f, 0xd1, 0xdd, + 0xaf, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xfe, 0x45, 0x7e, 0xc6, 0x03, 0x00, 0x00, } func (m *PriorityClass) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index da27a13e7..e42dccc68 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "k8s.io/api/scheduling/v1alpha1"; // DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority @@ -36,9 +36,9 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,10 +55,9 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional optional string preemptionPolicy = 5; } @@ -68,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 4d8462c7c..26ba8ff5d 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -35,7 +35,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -52,10 +52,9 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index 940c39a08..557005db6 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index 64b1c1505..68e8e90d1 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +// source: k8s.io/api/scheduling/v1beta1/generated.proto package v1beta1 @@ -48,7 +48,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PriorityClass) Reset() { *m = PriorityClass{} } func (*PriorityClass) ProtoMessage() {} func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd406dede2d3f42, []int{0} + return fileDescriptor_9edc3acf997efcf2, []int{0} } func (m *PriorityClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +76,7 @@ var xxx_messageInfo_PriorityClass proto.InternalMessageInfo func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } func (*PriorityClassList) ProtoMessage() {} func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd406dede2d3f42, []int{1} + return fileDescriptor_9edc3acf997efcf2, []int{1} } func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,42 +107,42 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) + proto.RegisterFile("k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_9edc3acf997efcf2) } -var fileDescriptor_6cd406dede2d3f42 = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, - 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, - 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, - 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, - 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, - 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, - 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, - 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, - 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, - 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, - 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, - 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, - 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, - 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, - 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, - 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, - 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, - 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, - 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, - 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, - 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, - 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, - 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, - 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, - 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, - 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, - 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, - 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, - 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, - 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, +var fileDescriptor_9edc3acf997efcf2 = []byte{ + // 481 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x8f, 0xd3, 0x30, + 0x18, 0x86, 0xe3, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xaa, 0x74, 0x69, 0xd4, 0x5b, + 0x32, 0x70, 0x36, 0x3d, 0x01, 0x42, 0xba, 0x2d, 0x77, 0x12, 0x42, 0x02, 0x51, 0x32, 0x30, 0x20, + 0x06, 0x9c, 0xd4, 0x97, 0x9a, 0x26, 0x71, 0x64, 0x3b, 0x95, 0xba, 0xf1, 0x13, 0xf8, 0x51, 0x0c, + 0x1d, 0x6f, 0xbc, 0xa9, 0xa2, 0xe1, 0x27, 0xb0, 0x31, 0xa1, 0xa4, 0xe1, 0xd2, 0x36, 0x50, 0x6e, + 0xcb, 0xf7, 0x7d, 0xcf, 0xfb, 0xda, 0x7e, 0x63, 0xc3, 0xd3, 0xd9, 0x0b, 0x89, 0x18, 0xc7, 0x24, + 0x65, 0x58, 0x06, 0x53, 0x3a, 0xc9, 0x22, 0x96, 0x84, 0x78, 0x3e, 0xf2, 0xa9, 0x22, 0x23, 0x1c, + 0xd2, 0x84, 0x0a, 0xa2, 0xe8, 0x04, 0xa5, 0x82, 0x2b, 0x6e, 0x1c, 0x6f, 0x70, 0x44, 0x52, 0x86, + 0x6a, 0x1c, 0x55, 0x78, 0xff, 0x34, 0x64, 0x6a, 0x9a, 0xf9, 0x28, 0xe0, 0x31, 0x0e, 0x79, 0xc8, + 0x71, 0xa9, 0xf2, 0xb3, 0xab, 0xb2, 0x2a, 0x8b, 0xf2, 0x6b, 0xe3, 0xd6, 0x1f, 0x6e, 0x2d, 0x1e, + 0x70, 0x41, 0xf1, 0xbc, 0xb1, 0x62, 0xff, 0x69, 0xcd, 0xc4, 0x24, 0x98, 0xb2, 0x84, 0x8a, 0x05, + 0x4e, 0x67, 0x61, 0xd1, 0x90, 0x38, 0xa6, 0x8a, 0xfc, 0x4d, 0x85, 0xff, 0xa5, 0x12, 0x59, 0xa2, + 0x58, 0x4c, 0x1b, 0x82, 0xe7, 0xff, 0x13, 0x14, 0xa7, 0x8d, 0xc9, 0xbe, 0x6e, 0xf8, 0xb3, 0x05, + 0xbb, 0x63, 0xc1, 0xb8, 0x60, 0x6a, 0x71, 0x11, 0x11, 0x29, 0x8d, 0x4f, 0xb0, 0x5d, 0xec, 0x6a, + 0x42, 0x14, 0x31, 0x81, 0x0d, 0x9c, 0xce, 0xd9, 0x13, 0x54, 0xa7, 0x76, 0x6b, 0x8e, 0xd2, 0x59, + 0x58, 0x34, 0x24, 0x2a, 0x68, 0x34, 0x1f, 0xa1, 0xb7, 0xfe, 0x67, 0x1a, 0xa8, 0x37, 0x54, 0x11, + 0xd7, 0x58, 0xae, 0x06, 0x5a, 0xbe, 0x1a, 0xc0, 0xba, 0xe7, 0xdd, 0xba, 0x1a, 0x27, 0x50, 0x9f, + 0x93, 0x28, 0xa3, 0x66, 0xcb, 0x06, 0x8e, 0xee, 0x76, 0x2b, 0x58, 0x7f, 0x5f, 0x34, 0xbd, 0xcd, + 0xcc, 0x38, 0x87, 0xdd, 0x30, 0xe2, 0x3e, 0x89, 0x2e, 0xe9, 0x15, 0xc9, 0x22, 0x65, 0x1e, 0xd9, + 0xc0, 0x69, 0xbb, 0x8f, 0x2a, 0xb8, 0xfb, 0x72, 0x7b, 0xe8, 0xed, 0xb2, 0xc6, 0x33, 0xd8, 0x99, + 0x50, 0x19, 0x08, 0x96, 0x2a, 0xc6, 0x13, 0xf3, 0x9e, 0x0d, 0x9c, 0xfb, 0xee, 0xc3, 0x4a, 0xda, + 0xb9, 0xac, 0x47, 0xde, 0x36, 0x67, 0x84, 0xb0, 0x97, 0x0a, 0x4a, 0xe3, 0xb2, 0x1a, 0xf3, 0x88, + 0x05, 0x0b, 0x53, 0x2f, 0xb5, 0xe7, 0xf9, 0x6a, 0xd0, 0x1b, 0xef, 0xcd, 0x7e, 0xad, 0x06, 0x27, + 0xcd, 0x1b, 0x80, 0xf6, 0x31, 0xaf, 0x61, 0x3a, 0xfc, 0x06, 0xe0, 0x83, 0x9d, 0xd4, 0x5f, 0x33, + 0xa9, 0x8c, 0x8f, 0x8d, 0xe4, 0xd1, 0xdd, 0x92, 0x2f, 0xd4, 0x65, 0xee, 0xbd, 0xea, 0x88, 0xed, + 0x3f, 0x9d, 0xad, 0xd4, 0xdf, 0x41, 0x9d, 0x29, 0x1a, 0x4b, 0xb3, 0x65, 0x1f, 0x39, 0x9d, 0xb3, + 0xc7, 0xe8, 0xe0, 0x53, 0x40, 0x3b, 0xdb, 0xab, 0xff, 0xd1, 0xab, 0xc2, 0xc2, 0xdb, 0x38, 0xb9, + 0x17, 0xcb, 0xb5, 0xa5, 0x5d, 0xaf, 0x2d, 0xed, 0x66, 0x6d, 0x69, 0x5f, 0x72, 0x0b, 0x2c, 0x73, + 0x0b, 0x5c, 0xe7, 0x16, 0xb8, 0xc9, 0x2d, 0xf0, 0x3d, 0xb7, 0xc0, 0xd7, 0x1f, 0x96, 0xf6, 0xe1, + 0xf8, 0xe0, 0x13, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x04, 0x2e, 0xb0, 0xce, 0xc2, 0x03, 0x00, + 0x00, } func (m *PriorityClass) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 99bdaabee..7f77b0175 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/scheduling/v1beta1"; // DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority @@ -36,9 +36,9 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. optional int32 value = 2; @@ -55,10 +55,9 @@ message PriorityClass { // +optional optional string description = 4; - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional optional string preemptionPolicy = 5; } @@ -68,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index d68b4b318..6f88592cf 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -39,7 +39,7 @@ type PriorityClass struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // The value of this priority class. This is the actual priority that pods + // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` @@ -56,10 +56,9 @@ type PriorityClass struct { // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. + // preemptionPolicy is the Policy for preempting pods with lower priority. // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. - // This field is beta-level, gated by the NonPreemptingPriority feature-gate. // +optional PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index d576c8401..f42008eb9 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "value": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.", + "preemptionPolicy": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", } func (PriorityClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 75a6489da..e2310dac2 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/storage/v1" diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 34a3c34dc..11c8c97c2 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto +// source: k8s.io/api/storage/v1/generated.proto package v1 @@ -28,6 +28,8 @@ import ( github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -49,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CSIDriver) Reset() { *m = CSIDriver{} } func (*CSIDriver) ProtoMessage() {} func (*CSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{0} + return fileDescriptor_662262cc70094b41, []int{0} } func (m *CSIDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,7 +79,7 @@ var xxx_messageInfo_CSIDriver proto.InternalMessageInfo func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } func (*CSIDriverList) ProtoMessage() {} func (*CSIDriverList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{1} + return fileDescriptor_662262cc70094b41, []int{1} } func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -105,7 +107,7 @@ var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } func (*CSIDriverSpec) ProtoMessage() {} func (*CSIDriverSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{2} + return fileDescriptor_662262cc70094b41, []int{2} } func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,7 +135,7 @@ var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo func (m *CSINode) Reset() { *m = CSINode{} } func (*CSINode) ProtoMessage() {} func (*CSINode) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{3} + return fileDescriptor_662262cc70094b41, []int{3} } func (m *CSINode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -161,7 +163,7 @@ var xxx_messageInfo_CSINode proto.InternalMessageInfo func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } func (*CSINodeDriver) ProtoMessage() {} func (*CSINodeDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{4} + return fileDescriptor_662262cc70094b41, []int{4} } func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -189,7 +191,7 @@ var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo func (m *CSINodeList) Reset() { *m = CSINodeList{} } func (*CSINodeList) ProtoMessage() {} func (*CSINodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{5} + return fileDescriptor_662262cc70094b41, []int{5} } func (m *CSINodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -217,7 +219,7 @@ var xxx_messageInfo_CSINodeList proto.InternalMessageInfo func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } func (*CSINodeSpec) ProtoMessage() {} func (*CSINodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{6} + return fileDescriptor_662262cc70094b41, []int{6} } func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,10 +244,66 @@ func (m *CSINodeSpec) XXX_DiscardUnknown() { var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo +func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} } +func (*CSIStorageCapacity) ProtoMessage() {} +func (*CSIStorageCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_662262cc70094b41, []int{7} +} +func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIStorageCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIStorageCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIStorageCapacity.Merge(m, src) +} +func (m *CSIStorageCapacity) XXX_Size() int { + return m.Size() +} +func (m *CSIStorageCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_CSIStorageCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo + +func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} } +func (*CSIStorageCapacityList) ProtoMessage() {} +func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) { + return fileDescriptor_662262cc70094b41, []int{8} +} +func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIStorageCapacityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIStorageCapacityList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIStorageCapacityList.Merge(m, src) +} +func (m *CSIStorageCapacityList) XXX_Size() int { + return m.Size() +} +func (m *CSIStorageCapacityList) XXX_DiscardUnknown() { + xxx_messageInfo_CSIStorageCapacityList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo + func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{7} + return fileDescriptor_662262cc70094b41, []int{9} } func (m *StorageClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +331,7 @@ var xxx_messageInfo_StorageClass proto.InternalMessageInfo func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{8} + return fileDescriptor_662262cc70094b41, []int{10} } func (m *StorageClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +359,7 @@ var xxx_messageInfo_StorageClassList proto.InternalMessageInfo func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{9} + return fileDescriptor_662262cc70094b41, []int{11} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +387,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{10} + return fileDescriptor_662262cc70094b41, []int{12} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +415,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{11} + return fileDescriptor_662262cc70094b41, []int{13} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -385,7 +443,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{12} + return fileDescriptor_662262cc70094b41, []int{14} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +471,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{13} + return fileDescriptor_662262cc70094b41, []int{15} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +499,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{14} + return fileDescriptor_662262cc70094b41, []int{16} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +527,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{15} + return fileDescriptor_662262cc70094b41, []int{17} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +555,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{16} + return fileDescriptor_662262cc70094b41, []int{18} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -530,6 +588,8 @@ func init() { proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver") proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList") proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1.CSINodeSpec") + proto.RegisterType((*CSIStorageCapacity)(nil), "k8s.io.api.storage.v1.CSIStorageCapacity") + proto.RegisterType((*CSIStorageCapacityList)(nil), "k8s.io.api.storage.v1.CSIStorageCapacityList") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") @@ -545,105 +605,115 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) -} - -var fileDescriptor_3b530c1983504d8d = []byte{ - // 1500 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0xce, 0xaf, 0x71, 0xd2, 0x24, 0x93, 0xf4, 0xfb, 0x35, 0x39, 0xd8, 0xd1, 0xb6, - 0x82, 0x50, 0xe8, 0xba, 0x29, 0xa5, 0xaa, 0x2a, 0x15, 0x29, 0x9b, 0xb8, 0x34, 0x22, 0xbf, 0x34, - 0x0e, 0x15, 0x42, 0x80, 0x3a, 0xd9, 0x9d, 0x38, 0x5b, 0x7b, 0x77, 0xb6, 0x3b, 0x63, 0x53, 0xdf, - 0xe0, 0xc2, 0x0d, 0x09, 0xae, 0x88, 0x3f, 0x02, 0x24, 0xb8, 0x70, 0xe4, 0x54, 0x6e, 0x15, 0xa7, - 0x9e, 0x2c, 0x6a, 0xce, 0xf0, 0x07, 0xe4, 0x84, 0x66, 0x76, 0xec, 0xfd, 0xe1, 0x75, 0x9a, 0x5e, - 0x72, 0xf3, 0xbe, 0x1f, 0x9f, 0xf7, 0x66, 0xde, 0x7b, 0x9f, 0x37, 0x06, 0x1f, 0x34, 0xee, 0x30, - 0xc3, 0xa1, 0x95, 0x46, 0xeb, 0x88, 0x04, 0x1e, 0xe1, 0x84, 0x55, 0xda, 0xc4, 0xb3, 0x69, 0x50, - 0x51, 0x0a, 0xec, 0x3b, 0x15, 0xc6, 0x69, 0x80, 0xeb, 0xa4, 0xd2, 0x5e, 0xaf, 0xd4, 0x89, 0x47, - 0x02, 0xcc, 0x89, 0x6d, 0xf8, 0x01, 0xe5, 0x14, 0x5e, 0x0e, 0xcd, 0x0c, 0xec, 0x3b, 0x86, 0x32, - 0x33, 0xda, 0xeb, 0x2b, 0xd7, 0xeb, 0x0e, 0x3f, 0x69, 0x1d, 0x19, 0x16, 0x75, 0x2b, 0x75, 0x5a, - 0xa7, 0x15, 0x69, 0x7d, 0xd4, 0x3a, 0x96, 0x5f, 0xf2, 0x43, 0xfe, 0x0a, 0x51, 0x56, 0xf4, 0x58, - 0x30, 0x8b, 0x06, 0x59, 0x91, 0x56, 0x6e, 0x45, 0x36, 0x2e, 0xb6, 0x4e, 0x1c, 0x8f, 0x04, 0x9d, - 0x8a, 0xdf, 0xa8, 0x0b, 0x01, 0xab, 0xb8, 0x84, 0xe3, 0x2c, 0xaf, 0xca, 0x28, 0xaf, 0xa0, 0xe5, - 0x71, 0xc7, 0x25, 0x43, 0x0e, 0xb7, 0x5f, 0xe5, 0xc0, 0xac, 0x13, 0xe2, 0xe2, 0xb4, 0x9f, 0xfe, - 0xab, 0x06, 0x66, 0x36, 0x6b, 0xdb, 0x5b, 0x81, 0xd3, 0x26, 0x01, 0x7c, 0x04, 0xa6, 0x45, 0x46, - 0x36, 0xe6, 0xb8, 0xa8, 0xad, 0x6a, 0x6b, 0x85, 0x9b, 0x37, 0x8c, 0xe8, 0xa6, 0x06, 0xc0, 0x86, - 0xdf, 0xa8, 0x0b, 0x01, 0x33, 0x84, 0xb5, 0xd1, 0x5e, 0x37, 0xf6, 0x8f, 0x1e, 0x13, 0x8b, 0xef, - 0x12, 0x8e, 0x4d, 0xf8, 0xac, 0x5b, 0x1e, 0xeb, 0x75, 0xcb, 0x20, 0x92, 0xa1, 0x01, 0x2a, 0xbc, - 0x0f, 0xf2, 0xcc, 0x27, 0x56, 0x71, 0x5c, 0xa2, 0x5f, 0x35, 0x32, 0xeb, 0x60, 0x0c, 0x32, 0xaa, - 0xf9, 0xc4, 0x32, 0x67, 0x15, 0x62, 0x5e, 0x7c, 0x21, 0xe9, 0xaf, 0xff, 0xa2, 0x81, 0xb9, 0x81, - 0xd5, 0x8e, 0xc3, 0x38, 0xfc, 0x6c, 0x28, 0x77, 0xe3, 0x7c, 0xb9, 0x0b, 0x6f, 0x99, 0xf9, 0x82, - 0x8a, 0x33, 0xdd, 0x97, 0xc4, 0xf2, 0xae, 0x82, 0x09, 0x87, 0x13, 0x97, 0x15, 0xc7, 0x57, 0x73, - 0x6b, 0x85, 0x9b, 0xab, 0xaf, 0x4a, 0xdc, 0x9c, 0x53, 0x60, 0x13, 0xdb, 0xc2, 0x0d, 0x85, 0xde, - 0xfa, 0x8f, 0xf9, 0x58, 0xda, 0xe2, 0x38, 0xf0, 0x2e, 0xb8, 0x84, 0x39, 0xc7, 0xd6, 0x09, 0x22, - 0x4f, 0x5a, 0x4e, 0x40, 0x6c, 0x99, 0xfc, 0xb4, 0x09, 0x7b, 0xdd, 0xf2, 0xa5, 0x8d, 0x84, 0x06, - 0xa5, 0x2c, 0x85, 0xaf, 0x4f, 0xed, 0x6d, 0xef, 0x98, 0xee, 0x7b, 0xbb, 0xb4, 0xe5, 0x71, 0x79, - 0xad, 0xca, 0xf7, 0x20, 0xa1, 0x41, 0x29, 0x4b, 0x68, 0x81, 0xe5, 0x36, 0x6d, 0xb6, 0x5c, 0xb2, - 0xe3, 0x1c, 0x13, 0xab, 0x63, 0x35, 0xc9, 0x2e, 0xb5, 0x09, 0x2b, 0xe6, 0x56, 0x73, 0x6b, 0x33, - 0x66, 0xa5, 0xd7, 0x2d, 0x2f, 0x3f, 0xcc, 0xd0, 0x9f, 0x76, 0xcb, 0x4b, 0x19, 0x72, 0x94, 0x09, - 0x06, 0xef, 0x81, 0x79, 0x75, 0x39, 0x9b, 0xd8, 0xc7, 0x96, 0xc3, 0x3b, 0xc5, 0xbc, 0xcc, 0x70, - 0xa9, 0xd7, 0x2d, 0xcf, 0xd7, 0x92, 0x2a, 0x94, 0xb6, 0x85, 0x0f, 0xc0, 0xdc, 0x31, 0xfb, 0x30, - 0xa0, 0x2d, 0xff, 0x80, 0x36, 0x1d, 0xab, 0x53, 0x9c, 0x58, 0xd5, 0xd6, 0x66, 0x4c, 0xbd, 0xd7, - 0x2d, 0xcf, 0xdd, 0xaf, 0xc5, 0x14, 0xa7, 0x69, 0x01, 0x4a, 0x3a, 0xc2, 0x47, 0x60, 0x8e, 0xd3, - 0x06, 0xf1, 0xc4, 0xd5, 0x11, 0xc6, 0x59, 0x71, 0x52, 0x96, 0xf1, 0xca, 0x88, 0x32, 0x1e, 0xc6, - 0x6c, 0xcd, 0xcb, 0xaa, 0x92, 0x73, 0x71, 0x29, 0x43, 0x49, 0x40, 0xb8, 0x09, 0x16, 0x83, 0xb0, - 0x2e, 0x0c, 0x11, 0xbf, 0x75, 0xd4, 0x74, 0xd8, 0x49, 0x71, 0x4a, 0x1e, 0xf6, 0x72, 0xaf, 0x5b, - 0x5e, 0x44, 0x69, 0x25, 0x1a, 0xb6, 0xd7, 0x7f, 0xd6, 0xc0, 0xd4, 0x66, 0x6d, 0x7b, 0x8f, 0xda, - 0xe4, 0x02, 0x66, 0x71, 0x2b, 0x31, 0x8b, 0xfa, 0xe8, 0x96, 0x16, 0xf9, 0x8c, 0x9c, 0xc4, 0x7f, - 0xc3, 0x49, 0x14, 0x36, 0x8a, 0x45, 0x56, 0x41, 0xde, 0xc3, 0x2e, 0x91, 0x59, 0xcf, 0x44, 0x3e, - 0x7b, 0xd8, 0x25, 0x48, 0x6a, 0xe0, 0x9b, 0x60, 0xd2, 0xa3, 0x36, 0xd9, 0xde, 0x92, 0xb1, 0x67, - 0xcc, 0x4b, 0xca, 0x66, 0x72, 0x4f, 0x4a, 0x91, 0xd2, 0xc2, 0x5b, 0x60, 0x96, 0x53, 0x9f, 0x36, - 0x69, 0xbd, 0xf3, 0x11, 0xe9, 0xf4, 0x9b, 0x73, 0xa1, 0xd7, 0x2d, 0xcf, 0x1e, 0xc6, 0xe4, 0x28, - 0x61, 0x05, 0x3f, 0x07, 0x05, 0xdc, 0x6c, 0x52, 0x0b, 0x73, 0x7c, 0xd4, 0x24, 0xb2, 0xe3, 0x0a, - 0x37, 0xaf, 0x8d, 0x38, 0x5e, 0xd8, 0xcc, 0x22, 0x2e, 0x22, 0x8c, 0xb6, 0x02, 0x8b, 0x30, 0x73, - 0xbe, 0xd7, 0x2d, 0x17, 0x36, 0x22, 0x08, 0x14, 0xc7, 0xd3, 0x7f, 0xd2, 0x40, 0x41, 0x1d, 0xf8, - 0x02, 0x88, 0x67, 0x33, 0x49, 0x3c, 0xa5, 0xb3, 0xab, 0x34, 0x82, 0x76, 0xbe, 0x18, 0x64, 0x2c, - 0x39, 0x67, 0x1f, 0x4c, 0xd9, 0xb2, 0x54, 0xac, 0xa8, 0x49, 0xd4, 0xab, 0x67, 0xa3, 0x2a, 0x4a, - 0x9b, 0x57, 0xd8, 0x53, 0xe1, 0x37, 0x43, 0x7d, 0x14, 0xfd, 0xdb, 0x49, 0x30, 0xdb, 0x9f, 0xe6, - 0x26, 0x66, 0xec, 0x02, 0x9a, 0xf7, 0x7d, 0x50, 0xf0, 0x03, 0xda, 0x76, 0x98, 0x43, 0x3d, 0x12, - 0xa8, 0x3e, 0x5a, 0x52, 0x2e, 0x85, 0x83, 0x48, 0x85, 0xe2, 0x76, 0xb0, 0x0e, 0x80, 0x8f, 0x03, - 0xec, 0x12, 0x2e, 0x4e, 0x9f, 0x93, 0xa7, 0x7f, 0x6f, 0xc4, 0xe9, 0xe3, 0x27, 0x32, 0x0e, 0x06, - 0x5e, 0x55, 0x8f, 0x07, 0x9d, 0x28, 0xbb, 0x48, 0x81, 0x62, 0xd0, 0xb0, 0x01, 0xe6, 0x02, 0x62, - 0x35, 0xb1, 0xe3, 0x2a, 0xee, 0xca, 0xcb, 0x0c, 0xab, 0x82, 0x48, 0x50, 0x5c, 0x71, 0xda, 0x2d, - 0xdf, 0x18, 0x7e, 0x47, 0x18, 0x07, 0x24, 0x60, 0x0e, 0xe3, 0xc4, 0xe3, 0x61, 0x87, 0x26, 0x7c, - 0x50, 0x12, 0x5b, 0xcc, 0x89, 0x2b, 0x58, 0x7d, 0xdf, 0xe7, 0x0e, 0xf5, 0x58, 0x71, 0x22, 0x9a, - 0x93, 0xdd, 0x98, 0x1c, 0x25, 0xac, 0xe0, 0x0e, 0x58, 0x16, 0x7d, 0xfd, 0x65, 0x18, 0xa0, 0xfa, - 0xd4, 0xc7, 0x9e, 0xb8, 0xa5, 0xe2, 0xa4, 0x64, 0xad, 0xa2, 0x58, 0x01, 0x1b, 0x19, 0x7a, 0x94, - 0xe9, 0x05, 0x3f, 0x01, 0x8b, 0xe1, 0x0e, 0x30, 0x1d, 0xcf, 0x76, 0xbc, 0xba, 0xd8, 0x00, 0x92, - 0x00, 0x67, 0xcc, 0x6b, 0x82, 0x00, 0x1f, 0xa6, 0x95, 0xa7, 0x59, 0x42, 0x34, 0x0c, 0x02, 0x9f, - 0x80, 0x45, 0x19, 0x91, 0xd8, 0x6a, 0xe8, 0x1d, 0xc2, 0x8a, 0xd3, 0xb2, 0x74, 0x6b, 0xf1, 0xd2, - 0x89, 0xab, 0x0b, 0xd9, 0x3b, 0x24, 0x83, 0x1a, 0x69, 0x12, 0x8b, 0xd3, 0xe0, 0x90, 0x04, 0xae, - 0xf9, 0x86, 0xaa, 0xd7, 0xe2, 0x46, 0x1a, 0x0a, 0x0d, 0xa3, 0xaf, 0xdc, 0x03, 0xf3, 0xa9, 0x82, - 0xc3, 0x05, 0x90, 0x6b, 0x90, 0x4e, 0x48, 0x6a, 0x48, 0xfc, 0x84, 0xcb, 0x60, 0xa2, 0x8d, 0x9b, - 0x2d, 0x12, 0x36, 0x1f, 0x0a, 0x3f, 0xee, 0x8e, 0xdf, 0xd1, 0xf4, 0xdf, 0x34, 0xb0, 0x10, 0xef, - 0x9e, 0x0b, 0xe0, 0x89, 0x07, 0x49, 0x9e, 0xb8, 0x72, 0x8e, 0x9e, 0x1e, 0x41, 0x16, 0x5f, 0x6b, - 0x60, 0x36, 0xbe, 0xea, 0xe0, 0xbb, 0x60, 0x1a, 0xb7, 0x6c, 0x87, 0x78, 0x56, 0x9f, 0xd3, 0x07, - 0x89, 0x6c, 0x28, 0x39, 0x1a, 0x58, 0x88, 0x45, 0x48, 0x9e, 0xfa, 0x4e, 0x80, 0x45, 0x93, 0xd5, - 0x88, 0x45, 0x3d, 0x9b, 0xc9, 0x1b, 0xca, 0x85, 0x8b, 0xb0, 0x9a, 0x56, 0xa2, 0x61, 0x7b, 0xfd, - 0x87, 0x71, 0xb0, 0x10, 0xf6, 0x46, 0xf8, 0x04, 0x72, 0x89, 0xc7, 0x2f, 0x80, 0x54, 0x76, 0x13, - 0x1b, 0xf1, 0x9d, 0x33, 0x57, 0x46, 0x94, 0xd8, 0xa8, 0xd5, 0x08, 0x3f, 0x06, 0x93, 0x8c, 0x63, - 0xde, 0x12, 0x44, 0x23, 0x00, 0xaf, 0x9f, 0x17, 0x50, 0x3a, 0x45, 0x5b, 0x31, 0xfc, 0x46, 0x0a, - 0x4c, 0xff, 0x5d, 0x03, 0xcb, 0x69, 0x97, 0x0b, 0xe8, 0xb0, 0x9d, 0x64, 0x87, 0xbd, 0x75, 0xce, - 0xc3, 0x8c, 0xe8, 0xb2, 0x3f, 0x35, 0xf0, 0xbf, 0xa1, 0x73, 0xcb, 0xfd, 0x2b, 0x78, 0xc9, 0x4f, - 0xb1, 0xdf, 0x5e, 0xf4, 0x9e, 0x90, 0xbc, 0x74, 0x90, 0xa1, 0x47, 0x99, 0x5e, 0xf0, 0x31, 0x58, - 0x70, 0xbc, 0xa6, 0xe3, 0x91, 0x50, 0x56, 0x8b, 0xea, 0x9b, 0x49, 0x1e, 0x69, 0x64, 0x59, 0xdc, - 0xe5, 0x5e, 0xb7, 0xbc, 0xb0, 0x9d, 0x42, 0x41, 0x43, 0xb8, 0xfa, 0x1f, 0x19, 0x95, 0x91, 0x1b, - 0x57, 0x8c, 0x90, 0x94, 0x90, 0x60, 0x68, 0x84, 0x94, 0x1c, 0x0d, 0x2c, 0x64, 0xdf, 0xc8, 0xab, - 0x50, 0x89, 0x9e, 0xbb, 0x6f, 0xa4, 0x53, 0xac, 0x6f, 0xe4, 0x37, 0x52, 0x60, 0x22, 0x09, 0xf1, - 0xae, 0x92, 0x77, 0x99, 0x4b, 0x26, 0xb1, 0xa7, 0xe4, 0x68, 0x60, 0xa1, 0xff, 0x93, 0xcb, 0x28, - 0x90, 0x6c, 0xc0, 0xd8, 0x69, 0xfa, 0xff, 0x56, 0xd2, 0xa7, 0xb1, 0x07, 0xa7, 0xb1, 0xe1, 0xf7, - 0x1a, 0x80, 0x78, 0x00, 0xb1, 0xdb, 0x6f, 0xd0, 0xb0, 0x8b, 0xaa, 0xaf, 0x35, 0x12, 0xc6, 0xc6, - 0x10, 0x4e, 0xb8, 0x8d, 0x57, 0x54, 0x7c, 0x38, 0x6c, 0x80, 0x32, 0x82, 0x43, 0x1b, 0x14, 0x42, - 0x69, 0x35, 0x08, 0x68, 0xa0, 0xc6, 0x53, 0x3f, 0x33, 0x17, 0x69, 0x69, 0x96, 0xe4, 0xd3, 0x30, - 0x72, 0x3d, 0xed, 0x96, 0x0b, 0x31, 0x3d, 0x8a, 0xc3, 0x8a, 0x28, 0x36, 0x89, 0xa2, 0xe4, 0x5f, - 0x2f, 0xca, 0x16, 0x19, 0x1d, 0x25, 0x06, 0xbb, 0x52, 0x05, 0xff, 0x1f, 0x71, 0x2d, 0xaf, 0xb5, - 0xb3, 0xbe, 0xd1, 0x40, 0x3c, 0x06, 0xdc, 0x01, 0x79, 0xee, 0xa8, 0xa9, 0x4b, 0x3e, 0x9f, 0xcf, - 0x20, 0x92, 0x43, 0xc7, 0x25, 0x11, 0x15, 0x8a, 0x2f, 0x24, 0x51, 0xe0, 0xdb, 0x60, 0xca, 0x25, - 0x8c, 0xe1, 0xba, 0x8a, 0x1c, 0x3d, 0x26, 0x77, 0x43, 0x31, 0xea, 0xeb, 0xf5, 0xdb, 0x60, 0x29, - 0xe3, 0x51, 0x0e, 0xcb, 0x60, 0xc2, 0x92, 0xff, 0x71, 0x45, 0x42, 0x13, 0xe6, 0x8c, 0x60, 0x94, - 0x4d, 0xf9, 0xd7, 0x36, 0x94, 0x9b, 0x6b, 0xcf, 0x5e, 0x96, 0xc6, 0x9e, 0xbf, 0x2c, 0x8d, 0xbd, - 0x78, 0x59, 0x1a, 0xfb, 0xaa, 0x57, 0xd2, 0x9e, 0xf5, 0x4a, 0xda, 0xf3, 0x5e, 0x49, 0x7b, 0xd1, - 0x2b, 0x69, 0x7f, 0xf5, 0x4a, 0xda, 0x77, 0x7f, 0x97, 0xc6, 0x3e, 0x1d, 0x6f, 0xaf, 0xff, 0x17, - 0x00, 0x00, 0xff, 0xff, 0x02, 0xb2, 0x6f, 0xe2, 0x3e, 0x12, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/storage/v1/generated.proto", fileDescriptor_662262cc70094b41) +} + +var fileDescriptor_662262cc70094b41 = []byte{ + // 1655 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xbd, 0x6f, 0x1b, 0xc9, + 0x15, 0xd7, 0x8a, 0xd4, 0xd7, 0x50, 0xb2, 0xa4, 0x91, 0xe4, 0x30, 0x2a, 0x48, 0x61, 0xed, 0x24, + 0xb2, 0x13, 0x2f, 0x6d, 0xd9, 0x31, 0x0c, 0x07, 0x2e, 0xb4, 0x12, 0x1d, 0x0b, 0x11, 0x25, 0x65, + 0xa8, 0x18, 0x46, 0x90, 0x04, 0x1e, 0xed, 0x8e, 0xa8, 0xb1, 0xb8, 0x1f, 0xde, 0x19, 0x2a, 0x62, + 0xaa, 0xa4, 0x49, 0x17, 0x20, 0x69, 0xf3, 0x57, 0x24, 0x40, 0xd2, 0x5c, 0x79, 0xc5, 0xc1, 0xd7, + 0x19, 0x57, 0xb9, 0x22, 0xce, 0xbc, 0xfa, 0xae, 0xbc, 0x42, 0xd5, 0x61, 0x66, 0x87, 0xdc, 0x0f, + 0x2e, 0x65, 0xa9, 0x61, 0xc7, 0x99, 0xf7, 0xde, 0xef, 0xbd, 0x99, 0xf7, 0xde, 0x6f, 0xde, 0x12, + 0xfc, 0xe4, 0xf4, 0x09, 0x33, 0xa8, 0x57, 0xc1, 0x3e, 0xad, 0x30, 0xee, 0x05, 0xb8, 0x41, 0x2a, + 0x67, 0x0f, 0x2a, 0x0d, 0xe2, 0x92, 0x00, 0x73, 0x62, 0x1b, 0x7e, 0xe0, 0x71, 0x0f, 0xae, 0x84, + 0x6a, 0x06, 0xf6, 0xa9, 0xa1, 0xd4, 0x8c, 0xb3, 0x07, 0xab, 0xf7, 0x1a, 0x94, 0x9f, 0xb4, 0x8e, + 0x0c, 0xcb, 0x73, 0x2a, 0x0d, 0xaf, 0xe1, 0x55, 0xa4, 0xf6, 0x51, 0xeb, 0x58, 0xae, 0xe4, 0x42, + 0xfe, 0x0a, 0x51, 0x56, 0xf5, 0x98, 0x33, 0xcb, 0x0b, 0xb2, 0x3c, 0xad, 0x3e, 0x8a, 0x74, 0x1c, + 0x6c, 0x9d, 0x50, 0x97, 0x04, 0xed, 0x8a, 0x7f, 0xda, 0x90, 0x46, 0x01, 0x61, 0x5e, 0x2b, 0xb0, + 0xc8, 0xb5, 0xac, 0x58, 0xc5, 0x21, 0x1c, 0x67, 0xf9, 0xaa, 0x0c, 0xb3, 0x0a, 0x5a, 0x2e, 0xa7, + 0xce, 0xa0, 0x9b, 0xc7, 0x9f, 0x32, 0x60, 0xd6, 0x09, 0x71, 0x70, 0xda, 0x4e, 0xff, 0xbf, 0x06, + 0x66, 0xb6, 0xea, 0x3b, 0xdb, 0x01, 0x3d, 0x23, 0x01, 0x7c, 0x0d, 0xa6, 0x45, 0x44, 0x36, 0xe6, + 0xb8, 0xa8, 0xad, 0x69, 0xeb, 0x85, 0x8d, 0xfb, 0x46, 0x74, 0xbf, 0x7d, 0x60, 0xc3, 0x3f, 0x6d, + 0x88, 0x0d, 0x66, 0x08, 0x6d, 0xe3, 0xec, 0x81, 0xb1, 0x7f, 0xf4, 0x86, 0x58, 0xbc, 0x46, 0x38, + 0x36, 0xe1, 0xbb, 0x4e, 0x79, 0xac, 0xdb, 0x29, 0x83, 0x68, 0x0f, 0xf5, 0x51, 0xe1, 0x73, 0x90, + 0x67, 0x3e, 0xb1, 0x8a, 0xe3, 0x12, 0xfd, 0xb6, 0x91, 0x99, 0x3d, 0xa3, 0x1f, 0x51, 0xdd, 0x27, + 0x96, 0x39, 0xab, 0x10, 0xf3, 0x62, 0x85, 0xa4, 0xbd, 0xfe, 0x3f, 0x0d, 0xcc, 0xf5, 0xb5, 0x76, + 0x29, 0xe3, 0xf0, 0x0f, 0x03, 0xb1, 0x1b, 0x57, 0x8b, 0x5d, 0x58, 0xcb, 0xc8, 0x17, 0x94, 0x9f, + 0xe9, 0xde, 0x4e, 0x2c, 0xee, 0x2a, 0x98, 0xa0, 0x9c, 0x38, 0xac, 0x38, 0xbe, 0x96, 0x5b, 0x2f, + 0x6c, 0xac, 0x7d, 0x2a, 0x70, 0x73, 0x4e, 0x81, 0x4d, 0xec, 0x08, 0x33, 0x14, 0x5a, 0xeb, 0x5f, + 0xe5, 0x63, 0x61, 0x8b, 0xe3, 0xc0, 0xa7, 0xe0, 0x06, 0xe6, 0x1c, 0x5b, 0x27, 0x88, 0xbc, 0x6d, + 0xd1, 0x80, 0xd8, 0x32, 0xf8, 0x69, 0x13, 0x76, 0x3b, 0xe5, 0x1b, 0x9b, 0x09, 0x09, 0x4a, 0x69, + 0x0a, 0x5b, 0xdf, 0xb3, 0x77, 0xdc, 0x63, 0x6f, 0xdf, 0xad, 0x79, 0x2d, 0x97, 0xcb, 0x6b, 0x55, + 0xb6, 0x07, 0x09, 0x09, 0x4a, 0x69, 0x42, 0x0b, 0x2c, 0x9f, 0x79, 0xcd, 0x96, 0x43, 0x76, 0xe9, + 0x31, 0xb1, 0xda, 0x56, 0x93, 0xd4, 0x3c, 0x9b, 0xb0, 0x62, 0x6e, 0x2d, 0xb7, 0x3e, 0x63, 0x56, + 0xba, 0x9d, 0xf2, 0xf2, 0xcb, 0x0c, 0xf9, 0x45, 0xa7, 0xbc, 0x94, 0xb1, 0x8f, 0x32, 0xc1, 0xe0, + 0x33, 0x30, 0xaf, 0x2e, 0x67, 0x0b, 0xfb, 0xd8, 0xa2, 0xbc, 0x5d, 0xcc, 0xcb, 0x08, 0x97, 0xba, + 0x9d, 0xf2, 0x7c, 0x3d, 0x29, 0x42, 0x69, 0x5d, 0xf8, 0x02, 0xcc, 0x1d, 0xb3, 0x5f, 0x07, 0x5e, + 0xcb, 0x3f, 0xf0, 0x9a, 0xd4, 0x6a, 0x17, 0x27, 0xd6, 0xb4, 0xf5, 0x19, 0x53, 0xef, 0x76, 0xca, + 0x73, 0xcf, 0xeb, 0x31, 0xc1, 0x45, 0x7a, 0x03, 0x25, 0x0d, 0xe1, 0x6b, 0x30, 0xc7, 0xbd, 0x53, + 0xe2, 0x8a, 0xab, 0x23, 0x8c, 0xb3, 0xe2, 0xa4, 0x4c, 0xe3, 0xad, 0x21, 0x69, 0x3c, 0x8c, 0xe9, + 0x9a, 0x2b, 0x2a, 0x93, 0x73, 0xf1, 0x5d, 0x86, 0x92, 0x80, 0x70, 0x0b, 0x2c, 0x06, 0x61, 0x5e, + 0x18, 0x22, 0x7e, 0xeb, 0xa8, 0x49, 0xd9, 0x49, 0x71, 0x4a, 0x1e, 0x76, 0xa5, 0xdb, 0x29, 0x2f, + 0xa2, 0xb4, 0x10, 0x0d, 0xea, 0xc3, 0x47, 0x60, 0x96, 0x91, 0x5d, 0xea, 0xb6, 0xce, 0xc3, 0x74, + 0x4e, 0x4b, 0xfb, 0x85, 0x6e, 0xa7, 0x3c, 0x5b, 0xaf, 0x46, 0xfb, 0x28, 0xa1, 0xa5, 0xff, 0x57, + 0x03, 0x53, 0x5b, 0xf5, 0x9d, 0x3d, 0xcf, 0x26, 0x23, 0xe8, 0xe0, 0xed, 0x44, 0x07, 0xeb, 0xc3, + 0x1b, 0x41, 0xc4, 0x33, 0xb4, 0x7f, 0xbf, 0x0b, 0xfb, 0x57, 0xe8, 0x28, 0xee, 0x59, 0x03, 0x79, + 0x17, 0x3b, 0x44, 0x46, 0x3d, 0x13, 0xd9, 0xec, 0x61, 0x87, 0x20, 0x29, 0x81, 0x3f, 0x05, 0x93, + 0xae, 0x67, 0x93, 0x9d, 0x6d, 0xe9, 0x7b, 0xc6, 0xbc, 0xa1, 0x74, 0x26, 0xf7, 0xe4, 0x2e, 0x52, + 0x52, 0x71, 0x8b, 0xdc, 0xf3, 0xbd, 0xa6, 0xd7, 0x68, 0xff, 0x86, 0xb4, 0x7b, 0x25, 0x2d, 0x6f, + 0xf1, 0x30, 0xb6, 0x8f, 0x12, 0x5a, 0xf0, 0x8f, 0xa0, 0x80, 0x9b, 0x4d, 0xcf, 0xc2, 0x1c, 0x1f, + 0x35, 0x89, 0xac, 0xd3, 0xc2, 0xc6, 0xdd, 0x21, 0xc7, 0x0b, 0x5b, 0x40, 0xf8, 0x45, 0x8a, 0xf8, + 0x99, 0x39, 0xdf, 0xed, 0x94, 0x0b, 0x9b, 0x11, 0x04, 0x8a, 0xe3, 0xe9, 0xff, 0xd1, 0x40, 0x41, + 0x1d, 0x78, 0x04, 0x74, 0xb5, 0x95, 0xa4, 0xab, 0xd2, 0xe5, 0x59, 0x1a, 0x42, 0x56, 0x7f, 0xea, + 0x47, 0x2c, 0x99, 0x6a, 0x1f, 0x4c, 0xd9, 0x32, 0x55, 0xac, 0xa8, 0x49, 0xd4, 0xdb, 0x97, 0xa3, + 0x2a, 0x22, 0x9c, 0x57, 0xd8, 0x53, 0xe1, 0x9a, 0xa1, 0x1e, 0x8a, 0xfe, 0x7d, 0x0e, 0xc0, 0xad, + 0xfa, 0x4e, 0x8a, 0x06, 0x46, 0x50, 0xc2, 0x14, 0xcc, 0x8a, 0x52, 0xe9, 0x15, 0x83, 0x2a, 0xe5, + 0x87, 0x57, 0xbc, 0x7f, 0x7c, 0x44, 0x9a, 0x75, 0xd2, 0x24, 0x16, 0xf7, 0x82, 0xb0, 0xaa, 0xf6, + 0x62, 0x60, 0x28, 0x01, 0x0d, 0xb7, 0xc1, 0x42, 0x8f, 0xd5, 0x9a, 0x98, 0x31, 0x51, 0xcd, 0xc5, + 0x9c, 0xac, 0xde, 0xa2, 0x0a, 0x71, 0xa1, 0x9e, 0x92, 0xa3, 0x01, 0x0b, 0xf8, 0x0a, 0x4c, 0x5b, + 0x71, 0x02, 0xfd, 0x44, 0xb1, 0x18, 0xbd, 0x69, 0xc4, 0xf8, 0x6d, 0x0b, 0xbb, 0x9c, 0xf2, 0xb6, + 0x39, 0x2b, 0x0a, 0xa5, 0xcf, 0xb4, 0x7d, 0x34, 0xc8, 0xc0, 0xa2, 0x83, 0xcf, 0xa9, 0xd3, 0x72, + 0xc2, 0x92, 0xae, 0xd3, 0xbf, 0x10, 0x49, 0xb3, 0xd7, 0x77, 0x21, 0x69, 0xae, 0x96, 0x06, 0x43, + 0x83, 0xf8, 0xfa, 0x17, 0x1a, 0xb8, 0x39, 0x98, 0xf8, 0x11, 0xb4, 0xc5, 0x5e, 0xb2, 0x2d, 0xee, + 0x0c, 0x2f, 0xe0, 0x54, 0x6c, 0x43, 0x3a, 0xe4, 0x1f, 0x93, 0x60, 0x36, 0x9e, 0xbe, 0x11, 0xd4, + 0xee, 0x2f, 0x41, 0xc1, 0x0f, 0xbc, 0x33, 0xca, 0xa8, 0xe7, 0x92, 0x40, 0x31, 0xe1, 0x92, 0x32, + 0x29, 0x1c, 0x44, 0x22, 0x14, 0xd7, 0x83, 0x0d, 0x00, 0x7c, 0x1c, 0x60, 0x87, 0x70, 0xd1, 0xbf, + 0x39, 0x79, 0xfc, 0x87, 0x43, 0x8e, 0x1f, 0x3f, 0x91, 0x71, 0xd0, 0xb7, 0xaa, 0xba, 0x3c, 0x68, + 0x47, 0xd1, 0x45, 0x02, 0x14, 0x83, 0x86, 0xa7, 0x60, 0x2e, 0x20, 0x56, 0x13, 0x53, 0x47, 0xbd, + 0xd9, 0x79, 0x19, 0x61, 0x55, 0x3c, 0xa0, 0x28, 0x2e, 0xb8, 0xe8, 0x94, 0xef, 0x0f, 0x4e, 0xdd, + 0xc6, 0x01, 0x09, 0x18, 0x65, 0x9c, 0xb8, 0x3c, 0x2c, 0x98, 0x84, 0x0d, 0x4a, 0x62, 0x0b, 0xa6, + 0x77, 0xc4, 0x13, 0xb8, 0xef, 0x73, 0xea, 0xb9, 0xac, 0x38, 0x11, 0x31, 0x7d, 0x2d, 0xb6, 0x8f, + 0x12, 0x5a, 0x70, 0x17, 0x2c, 0x0b, 0x66, 0xfe, 0x73, 0xe8, 0xa0, 0x7a, 0xee, 0x63, 0x57, 0xdc, + 0x52, 0x71, 0x52, 0xbe, 0xb6, 0x45, 0x31, 0xfa, 0x6c, 0x66, 0xc8, 0x51, 0xa6, 0x15, 0x7c, 0x05, + 0x16, 0xc3, 0xd9, 0xc7, 0xa4, 0xae, 0x4d, 0xdd, 0x86, 0x98, 0x7c, 0xe4, 0xc3, 0x3f, 0x63, 0xde, + 0x15, 0x1d, 0xf1, 0x32, 0x2d, 0xbc, 0xc8, 0xda, 0x44, 0x83, 0x20, 0xf0, 0x2d, 0x58, 0x94, 0x1e, + 0x89, 0xad, 0xe8, 0x84, 0x12, 0x56, 0x9c, 0x96, 0xa9, 0x5b, 0x8f, 0xa7, 0x4e, 0x5c, 0x5d, 0x38, + 0xb5, 0x84, 0xa4, 0xd3, 0x23, 0xa7, 0x43, 0x12, 0x38, 0xe6, 0x8f, 0x55, 0xbe, 0x16, 0x37, 0xd3, + 0x50, 0x68, 0x10, 0x7d, 0xf5, 0x19, 0x98, 0x4f, 0x25, 0x1c, 0x2e, 0x80, 0xdc, 0x29, 0x69, 0x87, + 0xcf, 0x32, 0x12, 0x3f, 0xe1, 0x32, 0x98, 0x38, 0xc3, 0xcd, 0x16, 0x09, 0x8b, 0x0f, 0x85, 0x8b, + 0xa7, 0xe3, 0x4f, 0x34, 0xfd, 0x33, 0x0d, 0x24, 0xe8, 0x6c, 0x04, 0x2d, 0xfd, 0x22, 0xd9, 0xd2, + 0xb7, 0xae, 0x50, 0xd3, 0x43, 0x9a, 0xf9, 0x6f, 0x1a, 0x98, 0x8d, 0x8f, 0x78, 0xf0, 0x17, 0x60, + 0x1a, 0xb7, 0x6c, 0x4a, 0x5c, 0xab, 0x37, 0x95, 0xf4, 0x03, 0xd9, 0x54, 0xfb, 0xa8, 0xaf, 0x21, + 0x06, 0x40, 0x72, 0xee, 0xd3, 0x00, 0x8b, 0x22, 0xab, 0x13, 0xcb, 0x73, 0x6d, 0x26, 0x6f, 0x28, + 0x17, 0x32, 0x63, 0x35, 0x2d, 0x44, 0x83, 0xfa, 0xfa, 0xbf, 0xc7, 0xc1, 0x42, 0x58, 0x1b, 0xe1, + 0xe8, 0xef, 0x10, 0x97, 0x8f, 0x80, 0x54, 0x6a, 0x89, 0x99, 0xee, 0xe7, 0x97, 0x0e, 0x3d, 0x51, + 0x60, 0xc3, 0x86, 0x3b, 0xf8, 0x3b, 0x30, 0xc9, 0x38, 0xe6, 0x2d, 0x26, 0x9f, 0xba, 0xc2, 0xc6, + 0xbd, 0xab, 0x02, 0x4a, 0xa3, 0x68, 0xae, 0x0b, 0xd7, 0x48, 0x81, 0xe9, 0x9f, 0x6b, 0x60, 0x39, + 0x6d, 0x32, 0x82, 0x0a, 0xdb, 0x4d, 0x56, 0xd8, 0xcf, 0xae, 0x78, 0x98, 0x61, 0x5f, 0x80, 0x1a, + 0xb8, 0x39, 0x70, 0x6e, 0xf9, 0x92, 0x0a, 0x5e, 0xf2, 0x53, 0xec, 0xb7, 0x17, 0x4d, 0xc4, 0x92, + 0x97, 0x0e, 0x32, 0xe4, 0x28, 0xd3, 0x0a, 0xbe, 0x01, 0x0b, 0xd4, 0x6d, 0x52, 0x97, 0xa8, 0x87, + 0x37, 0xca, 0x6f, 0x26, 0x79, 0xa4, 0x91, 0x65, 0x72, 0x97, 0xc5, 0x7c, 0xb2, 0x93, 0x42, 0x41, + 0x03, 0xb8, 0xfa, 0x97, 0x19, 0x99, 0x91, 0x33, 0xa3, 0x68, 0x21, 0xb9, 0x43, 0x82, 0x81, 0x16, + 0x52, 0xfb, 0xa8, 0xaf, 0x21, 0xeb, 0x46, 0x5e, 0x85, 0x0a, 0xf4, 0xca, 0x75, 0x23, 0x8d, 0x62, + 0x75, 0x23, 0xd7, 0x48, 0x81, 0x89, 0x20, 0xc4, 0x4c, 0x16, 0x9b, 0xbd, 0xfa, 0x41, 0xec, 0xa9, + 0x7d, 0xd4, 0xd7, 0xd0, 0xbf, 0xcd, 0x65, 0x24, 0x48, 0x16, 0x60, 0xec, 0x34, 0xbd, 0xaf, 0xf4, + 0xf4, 0x69, 0xec, 0xfe, 0x69, 0x6c, 0xf8, 0x2f, 0x0d, 0x40, 0xdc, 0x87, 0xa8, 0xf5, 0x0a, 0x34, + 0xac, 0xa2, 0xea, 0xb5, 0x5a, 0xc2, 0xd8, 0x1c, 0xc0, 0x09, 0x5f, 0xe3, 0x55, 0xe5, 0x1f, 0x0e, + 0x2a, 0xa0, 0x0c, 0xe7, 0xd0, 0x06, 0x85, 0x70, 0xb7, 0x1a, 0x04, 0x5e, 0xa0, 0xda, 0x53, 0xbf, + 0x34, 0x16, 0xa9, 0x69, 0x96, 0xe4, 0xc7, 0x4d, 0x64, 0x7a, 0xd1, 0x29, 0x17, 0x62, 0x72, 0x14, + 0x87, 0x15, 0x5e, 0x6c, 0x12, 0x79, 0xc9, 0x5f, 0xcf, 0xcb, 0x36, 0x19, 0xee, 0x25, 0x06, 0xbb, + 0x5a, 0x05, 0x3f, 0x1a, 0x72, 0x2d, 0xd7, 0x7a, 0xb3, 0xfe, 0xae, 0x81, 0xb8, 0x0f, 0xb8, 0x0b, + 0xf2, 0x9c, 0xaa, 0xae, 0x4b, 0x7e, 0x00, 0x5e, 0x42, 0x24, 0x87, 0xd4, 0x21, 0x11, 0x15, 0x8a, + 0x15, 0x92, 0x28, 0xf0, 0x0e, 0x98, 0x72, 0x08, 0x63, 0xb8, 0xa1, 0x3c, 0x47, 0x9f, 0x43, 0xb5, + 0x70, 0x1b, 0xf5, 0xe4, 0xfa, 0x63, 0xb0, 0x94, 0xf1, 0x59, 0x09, 0xcb, 0x60, 0xc2, 0x92, 0x7f, + 0x06, 0x88, 0x80, 0x26, 0xcc, 0x19, 0xc1, 0x28, 0x5b, 0xf2, 0x5f, 0x80, 0x70, 0xdf, 0xfc, 0xd5, + 0xbb, 0x8f, 0xa5, 0xb1, 0xf7, 0x1f, 0x4b, 0x63, 0x1f, 0x3e, 0x96, 0xc6, 0xfe, 0xda, 0x2d, 0x69, + 0xef, 0xba, 0x25, 0xed, 0x7d, 0xb7, 0xa4, 0x7d, 0xe8, 0x96, 0xb4, 0xaf, 0xbb, 0x25, 0xed, 0x9f, + 0xdf, 0x94, 0xc6, 0x7e, 0xbf, 0x92, 0xf9, 0x77, 0xea, 0x0f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7a, + 0x55, 0x95, 0x9f, 0x66, 0x15, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -756,6 +826,16 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SELinuxMount != nil { + i-- + if *m.SELinuxMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } if m.RequiresRepublish != nil { i-- if *m.RequiresRepublish { @@ -1010,6 +1090,127 @@ func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIStorageCapacity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIStorageCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaximumVolumeSize != nil { + { + size, err := m.MaximumVolumeSize.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Capacity != nil { + { + size, err := m.Capacity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.StorageClassName) + copy(dAtA[i:], m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i-- + dAtA[i] = 0x1a + if m.NodeTopology != nil { + { + size, err := m.NodeTopology.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSIStorageCapacityList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIStorageCapacityList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIStorageCapacityList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1604,6 +1805,9 @@ func (m *CSIDriverSpec) Size() (n int) { if m.RequiresRepublish != nil { n += 2 } + if m.SELinuxMount != nil { + n += 2 + } return n } @@ -1675,6 +1879,48 @@ func (m *CSINodeSpec) Size() (n int) { return n } +func (m *CSIStorageCapacity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeTopology != nil { + l = m.NodeTopology.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Capacity != nil { + l = m.Capacity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaximumVolumeSize != nil { + l = m.MaximumVolumeSize.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSIStorageCapacityList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *StorageClass) Size() (n int) { if m == nil { return 0 @@ -1915,6 +2161,7 @@ func (this *CSIDriverSpec) String() string { `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, `TokenRequests:` + repeatedStringForTokenRequests + `,`, `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, + `SELinuxMount:` + valueToStringGenerated(this.SELinuxMount) + `,`, `}`, }, "") return s @@ -1974,6 +2221,36 @@ func (this *CSINodeSpec) String() string { }, "") return s } +func (this *CSIStorageCapacity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIStorageCapacity{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `NodeTopology:` + strings.Replace(fmt.Sprintf("%v", this.NodeTopology), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `Capacity:` + strings.Replace(fmt.Sprintf("%v", this.Capacity), "Quantity", "resource.Quantity", 1) + `,`, + `MaximumVolumeSize:` + strings.Replace(fmt.Sprintf("%v", this.MaximumVolumeSize), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIStorageCapacityList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSIStorageCapacity{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIStorageCapacity", "CSIStorageCapacity", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSIStorageCapacityList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *StorageClass) String() string { if this == nil { return "nil" @@ -2581,6 +2858,27 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.RequiresRepublish = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SELinuxMount = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3101,6 +3399,346 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *CSIStorageCapacity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIStorageCapacity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIStorageCapacity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeTopology", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeTopology == nil { + m.NodeTopology = &v1.LabelSelector{} + } + if err := m.NodeTopology.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = &resource.Quantity{} + } + if err := m.Capacity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaximumVolumeSize", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaximumVolumeSize == nil { + m.MaximumVolumeSize = &resource.Quantity{} + } + if err := m.MaximumVolumeSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIStorageCapacityList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIStorageCapacityList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIStorageCapacityList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSIStorageCapacity{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StorageClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index de639354d..ec2beac46 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -22,12 +22,13 @@ syntax = "proto2"; package k8s.io.api.storage.v1; import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/api/storage/v1"; // CSIDriver captures information about a Container Storage Interface (CSI) // volume driver deployed on the cluster. @@ -43,9 +44,9 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -54,7 +55,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -78,17 +79,16 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace @@ -103,35 +103,33 @@ message CSIDriverSpec { // deployed on such a cluster and the deployment determines which mode that is, for example // via a command line parameter of the driver. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // +optional optional bool podInfoOnMount = 2; // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -144,27 +142,25 @@ message CSIDriverSpec { // // This field was immutable in Kubernetes <= 1.22 and now is mutable. // - // This is a beta field and only available when the CSIStorageCapacity - // feature is enabled. The default is false. - // // +optional // +featureGate=CSIStorageCapacity optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // Defaults to ReadWriteOnceWithFSType, which will examine each volume // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -184,7 +180,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -194,6 +190,28 @@ message CSIDriverSpec { // // +optional optional bool requiresRepublish = 7; + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + optional bool seLinuxMount = 8; } // CSINode holds information about all CSI drivers installed on a node. @@ -206,8 +224,9 @@ message CSIDriverSpec { // enough that it doesn't create this object. // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { + // Standard object's metadata. // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -215,7 +234,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -242,6 +261,7 @@ message CSINodeDriver { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic repeated string topologyKeys = 3; // allocatable represents the volume resources of a node that are available for scheduling. @@ -255,7 +275,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -267,9 +287,103 @@ message CSINodeSpec { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated CSINodeDriver drivers = 1; } +// CSIStorageCapacity stores the result of one CSI GetCapacity call. +// For a given StorageClass, this describes the available capacity in a +// particular topology segment. This can be used when considering where to +// instantiate new PersistentVolumes. +// +// For example this can express things like: +// - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" +// - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" +// +// The following three cases all imply that no capacity is available for +// a certain combination: +// - no object exists with suitable topology and storage class name +// - such an object exists, but the capacity is unset +// - such an object exists, but the capacity is zero +// +// The producer of these objects can decide which approach is more suitable. +// +// They are consumed by the kube-scheduler when a CSI driver opts into +// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler +// compares the MaximumVolumeSize against the requested size of pending volumes +// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back +// to a comparison against the less precise Capacity. If that is also unset, +// the scheduler assumes that capacity is insufficient and tries some other +// node. +message CSIStorageCapacity { + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. + // + // Objects are namespaced. + // + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // nodeTopology defines which nodes have access to the storage + // for which capacity was reported. If not set, the storage is + // not accessible from any node in the cluster. If empty, the + // storage is accessible from all nodes. This field is + // immutable. + // + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + + // storageClassName represents the name of the StorageClass that the reported capacity applies to. + // It must meet the same requirements as the name of a StorageClass + // object (non-empty, DNS subdomain). If that object no longer exists, + // the CSIStorageCapacity object is obsolete and should be removed by its + // creator. + // This field is immutable. + optional string storageClassName = 3; + + // capacity is the value reported by the CSI driver in its GetCapacityResponse + // for a GetCapacityRequest with topology and parameters that match the + // previous fields. + // + // The semantic is currently (CSI spec 1.2) defined as: + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If not set, that information is currently + // unavailable. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // for a GetCapacityRequest with topology and parameters that match the + // previous fields. + // + // This is defined since CSI spec 1.4.0 as the largest size + // that may be used in a + // CreateVolumeRequest.capacity_range.required_bytes field to + // create a volume with the same parameters as those in + // GetCapacityRequest. The corresponding value in the Kubernetes + // API is ResourceRequirements.Requests in a volume claim. + // + // +optional + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; +} + +// CSIStorageCapacityList is a collection of CSIStorageCapacity objects. +message CSIStorageCapacityList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSIStorageCapacity objects. + repeated CSIStorageCapacity items = 2; +} + // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. // @@ -279,44 +393,45 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -324,19 +439,19 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -351,13 +466,13 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -369,9 +484,9 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -380,7 +495,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -391,44 +506,44 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -437,11 +552,11 @@ message VolumeAttachmentStatus { // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -450,7 +565,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index 1a2f83d1b..094fa2821 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -55,6 +55,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CSIDriver{}, &CSIDriverList{}, + + &CSIStorageCapacity{}, + &CSIStorageCapacityList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 6da0657ec..de2bbc2e0 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -18,12 +18,14 @@ package v1 import ( v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. @@ -32,41 +34,43 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand. // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -76,16 +80,18 @@ type StorageClass struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -108,6 +114,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. @@ -121,11 +128,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -133,29 +140,31 @@ type VolumeAttachment struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -164,7 +173,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -180,26 +189,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -208,11 +217,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -222,6 +231,7 @@ type VolumeError struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriver captures information about a Container Storage Interface (CSI) // volume driver deployed on the cluster. @@ -241,11 +251,12 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriverList is a collection of CSIDriver objects. type CSIDriverList struct { @@ -278,17 +289,16 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace @@ -303,35 +313,33 @@ type CSIDriverSpec struct { // deployed on such a cluster and the deployment determines which mode that is, for example // via a command line parameter of the driver. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. + // A driver can support one or more of these modes and more modes may be added in the future. // + // This field is beta. // This field is immutable. // // +optional // +listType=set VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -344,27 +352,25 @@ type CSIDriverSpec struct { // // This field was immutable in Kubernetes <= 1.22 and now is mutable. // - // This is a beta field and only available when the CSIStorageCapacity - // feature is enabled. The default is false. - // // +optional // +featureGate=CSIStorageCapacity StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // - // This field is immutable. + // This field was immutable in Kubernetes < 1.29 and now is mutable. // // Defaults to ReadWriteOnceWithFSType, which will examine each volume // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -384,7 +390,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -394,6 +400,28 @@ type CSIDriverSpec struct { // // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -434,12 +462,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec". // // +optional @@ -470,6 +497,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as @@ -483,6 +511,7 @@ const ( type CSINode struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // metadata.name must be the Kubernetes node name. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -496,12 +525,14 @@ type CSINodeSpec struct { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` } // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -528,6 +559,7 @@ type CSINodeDriver struct { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` // allocatable represents the volume resources of a node that are available for scheduling. @@ -538,7 +570,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is not specified, then the supported number of volumes on this node is unbounded. @@ -547,6 +579,7 @@ type VolumeNodeResources struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINodeList is a collection of CSINode objects. type CSINodeList struct { @@ -560,3 +593,106 @@ type CSINodeList struct { // items is the list of CSINode Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 + +// CSIStorageCapacity stores the result of one CSI GetCapacity call. +// For a given StorageClass, this describes the available capacity in a +// particular topology segment. This can be used when considering where to +// instantiate new PersistentVolumes. +// +// For example this can express things like: +// - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" +// - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123" +// +// The following three cases all imply that no capacity is available for +// a certain combination: +// - no object exists with suitable topology and storage class name +// - such an object exists, but the capacity is unset +// - such an object exists, but the capacity is zero +// +// The producer of these objects can decide which approach is more suitable. +// +// They are consumed by the kube-scheduler when a CSI driver opts into +// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler +// compares the MaximumVolumeSize against the requested size of pending volumes +// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back +// to a comparison against the less precise Capacity. If that is also unset, +// the scheduler assumes that capacity is insufficient and tries some other +// node. +type CSIStorageCapacity struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). + // To ensure that there are no conflicts with other CSI drivers on the cluster, + // the recommendation is to use csisc-, a generated name, or a reverse-domain name + // which ends with the unique CSI driver name. + // + // Objects are namespaced. + // + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // nodeTopology defines which nodes have access to the storage + // for which capacity was reported. If not set, the storage is + // not accessible from any node in the cluster. If empty, the + // storage is accessible from all nodes. This field is + // immutable. + // + // +optional + NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` + + // storageClassName represents the name of the StorageClass that the reported capacity applies to. + // It must meet the same requirements as the name of a StorageClass + // object (non-empty, DNS subdomain). If that object no longer exists, + // the CSIStorageCapacity object is obsolete and should be removed by its + // creator. + // This field is immutable. + StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` + + // capacity is the value reported by the CSI driver in its GetCapacityResponse + // for a GetCapacityRequest with topology and parameters that match the + // previous fields. + // + // The semantic is currently (CSI spec 1.2) defined as: + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If not set, that information is currently + // unavailable. + // + // +optional + Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` + + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // for a GetCapacityRequest with topology and parameters that match the + // previous fields. + // + // This is defined since CSI spec 1.4.0 as the largest size + // that may be used in a + // CreateVolumeRequest.capacity_range.required_bytes field to + // create a volume with the same parameters as those in + // GetCapacityRequest. The corresponding value in the Kubernetes + // API is ResourceRequirements.Requests in a volume claim. + // + // +optional + MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty" protobuf:"bytes,5,opt,name=maximumVolumeSize"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 + +// CSIStorageCapacityList is a collection of CSIStorageCapacity objects. +type CSIStorageCapacityList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSIStorageCapacity objects. + Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index ed5b18cb7..89b1cbb20 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,12 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -64,7 +65,7 @@ func (CSIDriverSpec) SwaggerDoc() map[string]string { var map_CSINode = map[string]string{ "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", + "metadata": "Standard object's metadata. metadata.name must be the Kubernetes node name.", "spec": "spec is the specification of CSINode", } @@ -74,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", @@ -103,16 +104,39 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { return map_CSINodeSpec } +var map_CSIStorageCapacity = map[string]string{ + "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", + "metadata": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", +} + +func (CSIStorageCapacity) SwaggerDoc() map[string]string { + return map_CSIStorageCapacity +} + +var map_CSIStorageCapacityList = map[string]string{ + "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSIStorageCapacity objects.", +} + +func (CSIStorageCapacityList) SwaggerDoc() map[string]string { + return map_CSIStorageCapacityList +} + var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -122,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -131,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -142,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -153,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -162,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -171,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -182,10 +206,10 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { @@ -194,8 +218,8 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -204,7 +228,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 300f42cbb..74ae83bca 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1 import ( corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -126,6 +127,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(bool) **out = **in } + if in.SELinuxMount != nil { + in, out := &in.SELinuxMount, &out.SELinuxMount + *out = new(bool) + **out = **in + } return } @@ -248,6 +254,80 @@ func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIStorageCapacity) DeepCopyInto(out *CSIStorageCapacity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.NodeTopology != nil { + in, out := &in.NodeTopology, &out.NodeTopology + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + x := (*in).DeepCopy() + *out = &x + } + if in.MaximumVolumeSize != nil { + in, out := &in.MaximumVolumeSize, &out.MaximumVolumeSize + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIStorageCapacity. +func (in *CSIStorageCapacity) DeepCopy() *CSIStorageCapacity { + if in == nil { + return nil + } + out := new(CSIStorageCapacity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIStorageCapacity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIStorageCapacityList) DeepCopyInto(out *CSIStorageCapacityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSIStorageCapacity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIStorageCapacityList. +func (in *CSIStorageCapacityList) DeepCopy() *CSIStorageCapacityList { + if in == nil { + return nil + } + out := new(CSIStorageCapacityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIStorageCapacityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..a44c1181a --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriver) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriverList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINode) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacity) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacityList) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClass) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachment) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachmentList) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 0c82ddad3..86343b170 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// source: k8s.io/api/storage/v1alpha1/generated.proto package v1alpha1 @@ -50,7 +50,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} } func (*CSIStorageCapacity) ProtoMessage() {} func (*CSIStorageCapacity) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{0} + return fileDescriptor_02e7952e43280c27, []int{0} } func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -78,7 +78,7 @@ var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} } func (*CSIStorageCapacityList) ProtoMessage() {} func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{1} + return fileDescriptor_02e7952e43280c27, []int{1} } func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +106,7 @@ var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{2} + return fileDescriptor_02e7952e43280c27, []int{2} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +134,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{3} + return fileDescriptor_02e7952e43280c27, []int{3} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +162,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{4} + return fileDescriptor_02e7952e43280c27, []int{4} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -190,7 +190,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{5} + return fileDescriptor_02e7952e43280c27, []int{5} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -218,7 +218,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{6} + return fileDescriptor_02e7952e43280c27, []int{6} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,10 +243,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } +func (*VolumeAttributesClass) ProtoMessage() {} +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { + return fileDescriptor_02e7952e43280c27, []int{7} +} +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClass.Merge(m, src) +} +func (m *VolumeAttributesClass) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClass) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo + +func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } +func (*VolumeAttributesClassList) ProtoMessage() {} +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_02e7952e43280c27, []int{8} +} +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) +} +func (m *VolumeAttributesClassList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo + func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{7} + return fileDescriptor_02e7952e43280c27, []int{9} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -280,73 +336,82 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass.ParametersEntry") + proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) -} - -var fileDescriptor_10f856db1e670dc4 = []byte{ - // 923 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0x8f, 0x9b, 0x74, 0x37, 0x3b, 0x29, 0x90, 0x1d, 0x85, 0x25, 0x0a, 0x92, 0xb3, 0xca, 0x29, - 0x20, 0x76, 0x4c, 0x17, 0x84, 0x56, 0xdc, 0xea, 0xb6, 0x87, 0x8a, 0xb6, 0xc0, 0xa4, 0x42, 0x08, - 0x38, 0x30, 0x71, 0x1e, 0xce, 0x34, 0xf1, 0x1f, 0x79, 0xc6, 0x15, 0xe1, 0xc4, 0x89, 0x33, 0x37, - 0xbe, 0x01, 0x9f, 0xa5, 0x07, 0x24, 0x56, 0x9c, 0xf6, 0x14, 0x51, 0xf3, 0x1d, 0x38, 0x70, 0x01, - 0x79, 0x3c, 0x71, 0xdc, 0x38, 0x2d, 0xd9, 0x1e, 0xf6, 0xe6, 0xf7, 0xe6, 0xbd, 0xdf, 0xef, 0xfd, - 0x4f, 0xd0, 0xc1, 0xe4, 0x99, 0x20, 0x3c, 0xb0, 0x26, 0xf1, 0x10, 0x22, 0x1f, 0x24, 0x08, 0xeb, - 0x02, 0xfc, 0x51, 0x10, 0x59, 0xfa, 0x81, 0x85, 0xdc, 0x12, 0x32, 0x88, 0x98, 0x0b, 0xd6, 0xc5, - 0x2e, 0x9b, 0x86, 0x63, 0xb6, 0x6b, 0xb9, 0xe0, 0x43, 0xc4, 0x24, 0x8c, 0x48, 0x18, 0x05, 0x32, - 0xc0, 0x6f, 0x67, 0xc6, 0x84, 0x85, 0x9c, 0x68, 0x63, 0xb2, 0x30, 0xee, 0x3c, 0x71, 0xb9, 0x1c, - 0xc7, 0x43, 0xe2, 0x04, 0x9e, 0xe5, 0x06, 0x6e, 0x60, 0x29, 0x9f, 0x61, 0xfc, 0x9d, 0x92, 0x94, - 0xa0, 0xbe, 0x32, 0xac, 0x4e, 0xaf, 0x40, 0xec, 0x04, 0x51, 0xca, 0xba, 0xca, 0xd7, 0xf9, 0x70, - 0x69, 0xe3, 0x31, 0x67, 0xcc, 0x7d, 0x88, 0x66, 0x56, 0x38, 0x71, 0x95, 0x53, 0x04, 0x22, 0x88, - 0x23, 0x07, 0x5e, 0xca, 0x4b, 0x58, 0x1e, 0x48, 0xb6, 0x8e, 0xcb, 0xba, 0xc9, 0x2b, 0x8a, 0x7d, - 0xc9, 0xbd, 0x32, 0xcd, 0x47, 0xff, 0xe7, 0x20, 0x9c, 0x31, 0x78, 0x6c, 0xd5, 0xaf, 0xf7, 0x77, - 0x15, 0xe1, 0xfd, 0xc1, 0xd1, 0x20, 0xab, 0xdf, 0x3e, 0x0b, 0x99, 0xc3, 0xe5, 0x0c, 0x7f, 0x8b, - 0xea, 0x69, 0x68, 0x23, 0x26, 0x59, 0xdb, 0x78, 0x6c, 0xf4, 0x1b, 0x4f, 0xdf, 0x27, 0xcb, 0x72, - 0xe7, 0x0c, 0x24, 0x9c, 0xb8, 0xa9, 0x42, 0x90, 0xd4, 0x9a, 0x5c, 0xec, 0x92, 0x4f, 0x87, 0xe7, - 0xe0, 0xc8, 0x13, 0x90, 0xcc, 0xc6, 0x97, 0xf3, 0x6e, 0x25, 0x99, 0x77, 0xd1, 0x52, 0x47, 0x73, - 0x54, 0xcc, 0xd1, 0x8e, 0x1f, 0x8c, 0xe0, 0x2c, 0x08, 0x83, 0x69, 0xe0, 0xce, 0xda, 0x5b, 0x8a, - 0xe5, 0x83, 0xcd, 0x58, 0x8e, 0xd9, 0x10, 0xa6, 0x03, 0x98, 0x82, 0x23, 0x83, 0xc8, 0x6e, 0x26, - 0xf3, 0xee, 0xce, 0x69, 0x01, 0x8c, 0x5e, 0x83, 0xc6, 0x07, 0xa8, 0xa9, 0xe7, 0x63, 0x7f, 0xca, - 0x84, 0x38, 0x65, 0x1e, 0xb4, 0xab, 0x8f, 0x8d, 0xfe, 0x03, 0xbb, 0xad, 0x43, 0x6c, 0x0e, 0x56, - 0xde, 0x69, 0xc9, 0x03, 0x7f, 0x89, 0xea, 0x8e, 0x2e, 0x4f, 0xbb, 0xa6, 0x82, 0x25, 0xb7, 0x05, - 0x4b, 0x16, 0x13, 0x41, 0x3e, 0x8f, 0x99, 0x2f, 0xb9, 0x9c, 0xd9, 0x3b, 0xc9, 0xbc, 0x5b, 0x5f, - 0x94, 0x98, 0xe6, 0x68, 0x58, 0xa0, 0x87, 0x1e, 0xfb, 0x9e, 0x7b, 0xb1, 0xf7, 0x45, 0x30, 0x8d, - 0x3d, 0x18, 0xf0, 0x1f, 0xa0, 0xbd, 0x7d, 0x27, 0x8a, 0x37, 0x93, 0x79, 0xf7, 0xe1, 0xc9, 0x2a, - 0x18, 0x2d, 0xe3, 0xf7, 0x7e, 0x33, 0xd0, 0xa3, 0x72, 0xe3, 0x8f, 0xb9, 0x90, 0xf8, 0x9b, 0x52, - 0xf3, 0xc9, 0x86, 0x6d, 0xe1, 0x22, 0x6b, 0x7d, 0x53, 0xd7, 0xb5, 0xbe, 0xd0, 0x14, 0x1a, 0x7f, - 0x86, 0xb6, 0xb9, 0x04, 0x4f, 0xb4, 0xb7, 0x1e, 0x57, 0xfb, 0x8d, 0xa7, 0x16, 0xb9, 0x65, 0x8d, - 0x49, 0x39, 0x42, 0xfb, 0x35, 0x8d, 0xbd, 0x7d, 0x94, 0xa2, 0xd0, 0x0c, 0xac, 0xf7, 0xeb, 0x16, - 0x6a, 0x66, 0xd9, 0xed, 0x49, 0xc9, 0x9c, 0xb1, 0x07, 0xbe, 0x7c, 0x05, 0x53, 0x3c, 0x40, 0x35, - 0x11, 0x82, 0xa3, 0xa7, 0x77, 0xf7, 0xd6, 0x5c, 0x56, 0xc3, 0x1b, 0x84, 0xe0, 0xd8, 0x3b, 0x1a, - 0xbe, 0x96, 0x4a, 0x54, 0x81, 0xe1, 0xaf, 0xd1, 0x3d, 0x21, 0x99, 0x8c, 0x85, 0x9a, 0xd2, 0xeb, - 0x4b, 0xb1, 0x01, 0xac, 0x72, 0xb5, 0x5f, 0xd7, 0xc0, 0xf7, 0x32, 0x99, 0x6a, 0xc8, 0xde, 0xa5, - 0x81, 0x5a, 0xab, 0x2e, 0xaf, 0xa0, 0xeb, 0xf4, 0x7a, 0xd7, 0x9f, 0xbc, 0x54, 0x4a, 0x37, 0xf4, - 0xfc, 0x0f, 0x03, 0x3d, 0x2a, 0x65, 0xaf, 0x16, 0x02, 0x1f, 0xa3, 0x56, 0x08, 0x91, 0xe0, 0x42, - 0x82, 0x2f, 0x33, 0x1b, 0xb5, 0xf6, 0x46, 0xb6, 0xf6, 0xc9, 0xbc, 0xdb, 0xfa, 0x6c, 0xcd, 0x3b, - 0x5d, 0xeb, 0x85, 0xcf, 0x51, 0x93, 0xfb, 0x53, 0xee, 0x83, 0xde, 0x9f, 0x65, 0xc7, 0xfb, 0xc5, - 0x3c, 0xd2, 0x1f, 0x8e, 0xb4, 0x20, 0xab, 0xc8, 0xaa, 0xd1, 0xad, 0xf4, 0xcc, 0x1c, 0xad, 0xa0, - 0xd0, 0x12, 0x6e, 0xef, 0xf7, 0x35, 0xfd, 0x49, 0x1f, 0xf0, 0x7b, 0xa8, 0xce, 0x94, 0x06, 0x22, - 0x9d, 0x46, 0x5e, 0xef, 0x3d, 0xad, 0xa7, 0xb9, 0x85, 0x9a, 0x21, 0x55, 0x8a, 0x35, 0x87, 0x75, - 0x83, 0x19, 0x52, 0xae, 0x85, 0x19, 0x52, 0x32, 0xd5, 0x90, 0x69, 0x28, 0xe9, 0x81, 0x2d, 0x1c, - 0xd2, 0x3c, 0x94, 0x53, 0xad, 0xa7, 0xb9, 0x45, 0xef, 0xdf, 0xea, 0x9a, 0x36, 0xa9, 0x61, 0x2c, - 0xe4, 0x34, 0x52, 0x39, 0xd5, 0x4b, 0x39, 0x8d, 0xf2, 0x9c, 0x46, 0xf8, 0x17, 0x03, 0x61, 0x96, - 0x43, 0x9c, 0x2c, 0x86, 0x35, 0x9b, 0xa8, 0x4f, 0xee, 0xb0, 0x24, 0x64, 0xaf, 0x84, 0x76, 0xe8, - 0xcb, 0x68, 0x66, 0x77, 0x74, 0x14, 0xb8, 0x6c, 0x40, 0xd7, 0x84, 0x80, 0xcf, 0x51, 0x23, 0xd3, - 0x1e, 0x46, 0x51, 0x10, 0xe9, 0xb5, 0xed, 0x6f, 0x10, 0x91, 0xb2, 0xb7, 0xcd, 0x64, 0xde, 0x6d, - 0xec, 0x2d, 0x01, 0xfe, 0x99, 0x77, 0x1b, 0x85, 0x77, 0x5a, 0x04, 0x4f, 0xb9, 0x46, 0xb0, 0xe4, - 0xaa, 0xdd, 0x85, 0xeb, 0x00, 0x6e, 0xe6, 0x2a, 0x80, 0x77, 0x0e, 0xd1, 0x5b, 0x37, 0x94, 0x08, - 0x37, 0x51, 0x75, 0x02, 0xb3, 0x6c, 0x12, 0x69, 0xfa, 0x89, 0x5b, 0x68, 0xfb, 0x82, 0x4d, 0xe3, - 0x6c, 0xe2, 0x1e, 0xd0, 0x4c, 0xf8, 0x78, 0xeb, 0x99, 0xd1, 0xfb, 0xc9, 0x40, 0x45, 0x0e, 0x7c, - 0x8c, 0x6a, 0xe9, 0x7f, 0x12, 0x7d, 0x66, 0xde, 0xdd, 0xec, 0xcc, 0x9c, 0x71, 0x0f, 0x96, 0xe7, - 0x32, 0x95, 0xa8, 0x42, 0xc1, 0xef, 0xa0, 0xfb, 0x1e, 0x08, 0xc1, 0x5c, 0xcd, 0x6c, 0xbf, 0xa1, - 0x8d, 0xee, 0x9f, 0x64, 0x6a, 0xba, 0x78, 0xb7, 0xc9, 0xe5, 0x95, 0x59, 0x79, 0x7e, 0x65, 0x56, - 0x5e, 0x5c, 0x99, 0x95, 0x1f, 0x13, 0xd3, 0xb8, 0x4c, 0x4c, 0xe3, 0x79, 0x62, 0x1a, 0x2f, 0x12, - 0xd3, 0xf8, 0x33, 0x31, 0x8d, 0x9f, 0xff, 0x32, 0x2b, 0x5f, 0xd5, 0x17, 0x85, 0xfb, 0x2f, 0x00, - 0x00, 0xff, 0xff, 0x95, 0x04, 0x69, 0x56, 0xa9, 0x0a, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_02e7952e43280c27) +} + +var fileDescriptor_02e7952e43280c27 = []byte{ + // 1009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x3d, 0x6f, 0x23, 0x45, + 0x18, 0xce, 0xda, 0xce, 0x9d, 0x6f, 0x1c, 0x38, 0xdf, 0xc8, 0x77, 0x18, 0x9f, 0xb4, 0x3e, 0xb9, + 0x32, 0x1f, 0x37, 0x4b, 0x02, 0x42, 0x27, 0x24, 0x0a, 0x6f, 0x92, 0x22, 0x22, 0x09, 0xc7, 0x38, + 0x02, 0x04, 0x14, 0x8c, 0xd7, 0x83, 0x3d, 0x89, 0xf7, 0x43, 0x33, 0xb3, 0x16, 0xa6, 0xa2, 0xa2, + 0xa6, 0xe3, 0x1f, 0xf0, 0x5b, 0x52, 0x20, 0x71, 0xba, 0xea, 0x2a, 0x8b, 0x2c, 0xfc, 0x06, 0x0a, + 0x1a, 0xd0, 0xce, 0x8e, 0xd7, 0x1b, 0xaf, 0x1d, 0x9c, 0x14, 0xe9, 0x3c, 0x33, 0xef, 0xfb, 0x3c, + 0xef, 0xc7, 0xf3, 0xbe, 0x9b, 0x80, 0x77, 0xce, 0x9e, 0x09, 0xc4, 0x7c, 0x8b, 0x04, 0xcc, 0x12, + 0xd2, 0xe7, 0x64, 0x40, 0xad, 0xf1, 0x36, 0x19, 0x05, 0x43, 0xb2, 0x6d, 0x0d, 0xa8, 0x47, 0x39, + 0x91, 0xb4, 0x8f, 0x02, 0xee, 0x4b, 0x1f, 0x3e, 0x4e, 0x8c, 0x11, 0x09, 0x18, 0xd2, 0xc6, 0x68, + 0x66, 0xdc, 0x78, 0x3a, 0x60, 0x72, 0x18, 0xf6, 0x90, 0xe3, 0xbb, 0xd6, 0xc0, 0x1f, 0xf8, 0x96, + 0xf2, 0xe9, 0x85, 0xdf, 0xa9, 0x93, 0x3a, 0xa8, 0x5f, 0x09, 0x56, 0xa3, 0x95, 0x21, 0x76, 0x7c, + 0x1e, 0xb3, 0x2e, 0xf2, 0x35, 0x3e, 0x98, 0xdb, 0xb8, 0xc4, 0x19, 0x32, 0x8f, 0xf2, 0x89, 0x15, + 0x9c, 0x0d, 0x94, 0x13, 0xa7, 0xc2, 0x0f, 0xb9, 0x43, 0xaf, 0xe5, 0x25, 0x2c, 0x97, 0x4a, 0xb2, + 0x8c, 0xcb, 0x5a, 0xe5, 0xc5, 0x43, 0x4f, 0x32, 0x37, 0x4f, 0xf3, 0xe1, 0xff, 0x39, 0x08, 0x67, + 0x48, 0x5d, 0xb2, 0xe8, 0xd7, 0xfa, 0xbb, 0x08, 0xe0, 0x6e, 0xf7, 0xa0, 0x9b, 0xd4, 0x6f, 0x97, + 0x04, 0xc4, 0x61, 0x72, 0x02, 0xbf, 0x05, 0xe5, 0x38, 0xb4, 0x3e, 0x91, 0xa4, 0x6e, 0x3c, 0x31, + 0xda, 0x95, 0x9d, 0xf7, 0xd0, 0xbc, 0xdc, 0x29, 0x03, 0x0a, 0xce, 0x06, 0xf1, 0x85, 0x40, 0xb1, + 0x35, 0x1a, 0x6f, 0xa3, 0x4f, 0x7b, 0xa7, 0xd4, 0x91, 0x47, 0x54, 0x12, 0x1b, 0x9e, 0x4f, 0x9b, + 0x1b, 0xd1, 0xb4, 0x09, 0xe6, 0x77, 0x38, 0x45, 0x85, 0x0c, 0x6c, 0x79, 0x7e, 0x9f, 0x9e, 0xf8, + 0x81, 0x3f, 0xf2, 0x07, 0x93, 0x7a, 0x41, 0xb1, 0xbc, 0xbf, 0x1e, 0xcb, 0x21, 0xe9, 0xd1, 0x51, + 0x97, 0x8e, 0xa8, 0x23, 0x7d, 0x6e, 0x57, 0xa3, 0x69, 0x73, 0xeb, 0x38, 0x03, 0x86, 0x2f, 0x41, + 0xc3, 0x3d, 0x50, 0xd5, 0xfa, 0xd8, 0x1d, 0x11, 0x21, 0x8e, 0x89, 0x4b, 0xeb, 0xc5, 0x27, 0x46, + 0xfb, 0x9e, 0x5d, 0xd7, 0x21, 0x56, 0xbb, 0x0b, 0xef, 0x38, 0xe7, 0x01, 0xbf, 0x04, 0x65, 0x47, + 0x97, 0xa7, 0x5e, 0x52, 0xc1, 0xa2, 0xab, 0x82, 0x45, 0x33, 0x45, 0xa0, 0xcf, 0x42, 0xe2, 0x49, + 0x26, 0x27, 0xf6, 0x56, 0x34, 0x6d, 0x96, 0x67, 0x25, 0xc6, 0x29, 0x1a, 0x14, 0xe0, 0x81, 0x4b, + 0xbe, 0x67, 0x6e, 0xe8, 0x7e, 0xee, 0x8f, 0x42, 0x97, 0x76, 0xd9, 0x0f, 0xb4, 0xbe, 0x79, 0x23, + 0x8a, 0x87, 0xd1, 0xb4, 0xf9, 0xe0, 0x68, 0x11, 0x0c, 0xe7, 0xf1, 0x5b, 0xbf, 0x19, 0xe0, 0x51, + 0xbe, 0xf1, 0x87, 0x4c, 0x48, 0xf8, 0x4d, 0xae, 0xf9, 0x68, 0xcd, 0xb6, 0x30, 0x91, 0xb4, 0xbe, + 0xaa, 0xeb, 0x5a, 0x9e, 0xdd, 0x64, 0x1a, 0x7f, 0x02, 0x36, 0x99, 0xa4, 0xae, 0xa8, 0x17, 0x9e, + 0x14, 0xdb, 0x95, 0x1d, 0x0b, 0x5d, 0x31, 0xc6, 0x28, 0x1f, 0xa1, 0xfd, 0x9a, 0xc6, 0xde, 0x3c, + 0x88, 0x51, 0x70, 0x02, 0xd6, 0xfa, 0xb5, 0x00, 0xaa, 0x49, 0x76, 0x1d, 0x29, 0x89, 0x33, 0x74, + 0xa9, 0x27, 0x6f, 0x41, 0xc5, 0x5d, 0x50, 0x12, 0x01, 0x75, 0xb4, 0x7a, 0xb7, 0xaf, 0xcc, 0x65, + 0x31, 0xbc, 0x6e, 0x40, 0x1d, 0x7b, 0x4b, 0xc3, 0x97, 0xe2, 0x13, 0x56, 0x60, 0xf0, 0x6b, 0x70, + 0x47, 0x48, 0x22, 0x43, 0xa1, 0x54, 0x7a, 0x79, 0x28, 0xd6, 0x80, 0x55, 0xae, 0xf6, 0xeb, 0x1a, + 0xf8, 0x4e, 0x72, 0xc6, 0x1a, 0xb2, 0x75, 0x6e, 0x80, 0xda, 0xa2, 0xcb, 0x2d, 0x74, 0x1d, 0x5f, + 0xee, 0xfa, 0xd3, 0x6b, 0xa5, 0xb4, 0xa2, 0xe7, 0x2f, 0x0d, 0xf0, 0x28, 0x97, 0xbd, 0x1a, 0x08, + 0x78, 0x08, 0x6a, 0x01, 0xe5, 0x82, 0x09, 0x49, 0x3d, 0x99, 0xd8, 0xa8, 0xb1, 0x37, 0x92, 0xb1, + 0x8f, 0xa6, 0xcd, 0xda, 0xf3, 0x25, 0xef, 0x78, 0xa9, 0x17, 0x3c, 0x05, 0x55, 0xe6, 0x8d, 0x98, + 0x47, 0xf5, 0xfc, 0xcc, 0x3b, 0xde, 0xce, 0xe6, 0x11, 0x7f, 0x38, 0xe2, 0x82, 0x2c, 0x22, 0xab, + 0x46, 0xd7, 0xe2, 0x35, 0x73, 0xb0, 0x80, 0x82, 0x73, 0xb8, 0xad, 0xdf, 0x97, 0xf4, 0x27, 0x7e, + 0x80, 0xef, 0x82, 0x32, 0x51, 0x37, 0x94, 0xeb, 0x34, 0xd2, 0x7a, 0x77, 0xf4, 0x3d, 0x4e, 0x2d, + 0x94, 0x86, 0x54, 0x29, 0x96, 0x2c, 0xd6, 0x35, 0x34, 0xa4, 0x5c, 0x33, 0x1a, 0x52, 0x67, 0xac, + 0x21, 0xe3, 0x50, 0xe2, 0x05, 0x9b, 0x59, 0xa4, 0x69, 0x28, 0xc7, 0xfa, 0x1e, 0xa7, 0x16, 0xad, + 0x7f, 0x8b, 0x4b, 0xda, 0xa4, 0xc4, 0x98, 0xc9, 0xa9, 0xaf, 0x72, 0x2a, 0xe7, 0x72, 0xea, 0xa7, + 0x39, 0xf5, 0xe1, 0x2f, 0x06, 0x80, 0x24, 0x85, 0x38, 0x9a, 0x89, 0x35, 0x51, 0xd4, 0x27, 0x37, + 0x18, 0x12, 0xd4, 0xc9, 0xa1, 0xed, 0x7b, 0x92, 0x4f, 0xec, 0x86, 0x8e, 0x02, 0xe6, 0x0d, 0xf0, + 0x92, 0x10, 0xe0, 0x29, 0xa8, 0x24, 0xb7, 0xfb, 0x9c, 0xfb, 0x5c, 0x8f, 0x6d, 0x7b, 0x8d, 0x88, + 0x94, 0xbd, 0x6d, 0x46, 0xd3, 0x66, 0xa5, 0x33, 0x07, 0xf8, 0x67, 0xda, 0xac, 0x64, 0xde, 0x71, + 0x16, 0x3c, 0xe6, 0xea, 0xd3, 0x39, 0x57, 0xe9, 0x26, 0x5c, 0x7b, 0x74, 0x35, 0x57, 0x06, 0xbc, + 0xb1, 0x0f, 0xde, 0x58, 0x51, 0x22, 0x58, 0x05, 0xc5, 0x33, 0x3a, 0x49, 0x94, 0x88, 0xe3, 0x9f, + 0xb0, 0x06, 0x36, 0xc7, 0x64, 0x14, 0x26, 0x8a, 0xbb, 0x87, 0x93, 0xc3, 0x47, 0x85, 0x67, 0x46, + 0xeb, 0xaf, 0x02, 0x78, 0x98, 0x76, 0x80, 0xb3, 0x5e, 0x28, 0xa9, 0x50, 0x1f, 0xd6, 0x5b, 0xd8, + 0xd0, 0x3b, 0x00, 0xf4, 0x39, 0x1b, 0x53, 0xae, 0xd4, 0xaa, 0x42, 0x9b, 0x7b, 0xec, 0xa5, 0x2f, + 0x38, 0x63, 0x05, 0xc7, 0x00, 0x04, 0x84, 0x13, 0x97, 0x4a, 0xca, 0xe3, 0x25, 0x1c, 0xeb, 0xcb, + 0x5e, 0x4f, 0x5f, 0xd9, 0xec, 0xd0, 0xf3, 0x14, 0x24, 0x91, 0x55, 0xca, 0x3b, 0x7f, 0xc0, 0x19, + 0xa6, 0xc6, 0xc7, 0xe0, 0xfe, 0x82, 0xcb, 0xb5, 0xca, 0xfc, 0xd2, 0x00, 0x6f, 0x2e, 0x0d, 0xe4, + 0x16, 0xf6, 0xfb, 0x17, 0x97, 0xf7, 0xfb, 0xce, 0xf5, 0xab, 0xb5, 0x62, 0xc9, 0xff, 0x64, 0x80, + 0xac, 0x3e, 0xe1, 0x21, 0x28, 0xc5, 0x7f, 0xcf, 0xea, 0x14, 0xde, 0x5e, 0x2f, 0x85, 0x13, 0xe6, + 0xd2, 0xf9, 0xa7, 0x36, 0x3e, 0x61, 0x85, 0x02, 0xdf, 0x02, 0x77, 0x5d, 0x2a, 0x04, 0x19, 0xcc, + 0xa4, 0x71, 0x5f, 0x1b, 0xdd, 0x3d, 0x4a, 0xae, 0xf1, 0xec, 0xdd, 0xee, 0x9c, 0x5f, 0x98, 0x1b, + 0x2f, 0x2e, 0xcc, 0x8d, 0x57, 0x17, 0xe6, 0xc6, 0x8f, 0x91, 0x69, 0x9c, 0x47, 0xa6, 0xf1, 0x22, + 0x32, 0x8d, 0x57, 0x91, 0x69, 0xfc, 0x11, 0x99, 0xc6, 0xcf, 0x7f, 0x9a, 0x1b, 0x5f, 0x3d, 0xbe, + 0xe2, 0x3f, 0x98, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x19, 0x2c, 0xaa, 0xdf, 0x0c, 0x00, + 0x00, } func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) { @@ -734,6 +799,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -915,6 +1089,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } +func (m *VolumeAttributesClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *VolumeAttributesClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -1038,6 +1250,44 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } +func (this *VolumeAttributesClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&VolumeAttributesClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttributesClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttributesClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttributesClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -2198,6 +2448,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttributesClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 78cd16df2..380adbf66 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "k8s.io/api/storage/v1alpha1"; // CSIStorageCapacity stores the result of one CSI GetCapacity call. // For a given StorageClass, this describes the available capacity in a @@ -47,9 +47,13 @@ option go_package = "v1alpha1"; // // The producer of these objects can decide which approach is more suitable. // -// They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate -// is enabled there and a CSI driver opts into capacity-aware scheduling with -// CSIDriver.StorageCapacity. +// They are consumed by the kube-scheduler when a CSI driver opts into +// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler +// compares the MaximumVolumeSize against the requested size of pending volumes +// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back +// to a comparison against the less precise Capacity. If that is also unset, +// the scheduler assumes that capacity is insufficient and tries some other +// node. message CSIStorageCapacity { // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that @@ -61,18 +65,18 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -80,19 +84,19 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // // The semantic is currently (CSI spec 1.2) defined as: // The available capacity, in bytes, of the storage that can be used // to provision volumes. If not set, that information is currently - // unavailable and treated like zero capacity. + // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -104,7 +108,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -112,11 +116,9 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name + // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; } @@ -128,13 +130,13 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -146,9 +148,9 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -157,7 +159,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -168,57 +170,97 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is alpha-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional optional VolumeError detachError = 4; } +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +message VolumeAttributesClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Name of the CSI driver + // This field is immutable. + optional string driverName = 2; + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + map parameters = 3; +} + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +message VolumeAttributesClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of VolumeAttributesClass objects. + repeated VolumeAttributesClass items = 2; +} + // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go index 779c85802..a70f8e186 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/register.go +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -45,6 +45,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachmentList{}, &CSIStorageCapacity{}, &CSIStorageCapacityList{}, + &VolumeAttributesClass{}, + &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index afb0495db..1fbf65f81 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -41,11 +41,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -60,25 +60,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -87,7 +88,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -103,26 +104,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -131,11 +132,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive // information. // +optional @@ -165,11 +166,16 @@ type VolumeError struct { // // The producer of these objects can decide which approach is more suitable. // -// They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate -// is enabled there and a CSI driver opts into capacity-aware scheduling with -// CSIDriver.StorageCapacity. +// They are consumed by the kube-scheduler when a CSI driver opts into +// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler +// compares the MaximumVolumeSize against the requested size of pending volumes +// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back +// to a comparison against the less precise Capacity. If that is also unset, +// the scheduler assumes that capacity is insufficient and tries some other +// node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -182,7 +188,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -191,7 +197,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -199,19 +205,19 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // // The semantic is currently (CSI spec 1.2) defined as: // The available capacity, in bytes, of the storage that can be used // to provision volumes. If not set, that information is currently - // unavailable and treated like zero capacity. + // unavailable. // // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -234,13 +240,64 @@ type CSIStorageCapacity struct { // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name + // items is the list of CSIStorageCapacity objects. Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.29 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +type VolumeAttributesClass struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Name of the CSI driver + // This field is immutable. + DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.29 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +type VolumeAttributesClassList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of VolumeAttributesClass objects. + Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index fa50e0289..ac87dbdca 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -24,16 +24,16 @@ package v1alpha1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIStorageCapacity = map[string]string{ - "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.", + "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -53,8 +53,8 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -64,7 +64,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -93,20 +93,41 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } +var map_VolumeAttributesClass = map[string]string{ + "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "driverName": "Name of the CSI driver This field is immutable.", + "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", +} + +func (VolumeAttributesClass) SwaggerDoc() map[string]string { + return map_VolumeAttributesClass +} + +var map_VolumeAttributesClassList = map[string]string{ + "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of VolumeAttributesClass objects.", +} + +func (VolumeAttributesClassList) SwaggerDoc() map[string]string { + return map_VolumeAttributesClassList +} + var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index d9bc94b25..942871f78 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -238,6 +238,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { + if in == nil { + return nil + } + out := new(VolumeAttributesClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttributesClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { + if in == nil { + return nil + } + out := new(VolumeAttributesClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go index 41114c3c6..c169e782c 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -120,3 +120,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 24 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 32 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { + return 1, 35 +} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index 72b04d273..446a40c48 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto +// source: k8s.io/api/storage/v1beta1/generated.proto package v1beta1 @@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CSIDriver) Reset() { *m = CSIDriver{} } func (*CSIDriver) ProtoMessage() {} func (*CSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{0} + return fileDescriptor_73e4f72503e71065, []int{0} } func (m *CSIDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -79,7 +79,7 @@ var xxx_messageInfo_CSIDriver proto.InternalMessageInfo func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } func (*CSIDriverList) ProtoMessage() {} func (*CSIDriverList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{1} + return fileDescriptor_73e4f72503e71065, []int{1} } func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -107,7 +107,7 @@ var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } func (*CSIDriverSpec) ProtoMessage() {} func (*CSIDriverSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{2} + return fileDescriptor_73e4f72503e71065, []int{2} } func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -135,7 +135,7 @@ var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo func (m *CSINode) Reset() { *m = CSINode{} } func (*CSINode) ProtoMessage() {} func (*CSINode) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{3} + return fileDescriptor_73e4f72503e71065, []int{3} } func (m *CSINode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -163,7 +163,7 @@ var xxx_messageInfo_CSINode proto.InternalMessageInfo func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } func (*CSINodeDriver) ProtoMessage() {} func (*CSINodeDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{4} + return fileDescriptor_73e4f72503e71065, []int{4} } func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +191,7 @@ var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo func (m *CSINodeList) Reset() { *m = CSINodeList{} } func (*CSINodeList) ProtoMessage() {} func (*CSINodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{5} + return fileDescriptor_73e4f72503e71065, []int{5} } func (m *CSINodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -219,7 +219,7 @@ var xxx_messageInfo_CSINodeList proto.InternalMessageInfo func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } func (*CSINodeSpec) ProtoMessage() {} func (*CSINodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{6} + return fileDescriptor_73e4f72503e71065, []int{6} } func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +247,7 @@ var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} } func (*CSIStorageCapacity) ProtoMessage() {} func (*CSIStorageCapacity) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{7} + return fileDescriptor_73e4f72503e71065, []int{7} } func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +275,7 @@ var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} } func (*CSIStorageCapacityList) ProtoMessage() {} func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{8} + return fileDescriptor_73e4f72503e71065, []int{8} } func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +303,7 @@ var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{9} + return fileDescriptor_73e4f72503e71065, []int{9} } func (m *StorageClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +331,7 @@ var xxx_messageInfo_StorageClass proto.InternalMessageInfo func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{10} + return fileDescriptor_73e4f72503e71065, []int{10} } func (m *StorageClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -359,7 +359,7 @@ var xxx_messageInfo_StorageClassList proto.InternalMessageInfo func (m *TokenRequest) Reset() { *m = TokenRequest{} } func (*TokenRequest) ProtoMessage() {} func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{11} + return fileDescriptor_73e4f72503e71065, []int{11} } func (m *TokenRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +387,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{12} + return fileDescriptor_73e4f72503e71065, []int{12} } func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +415,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{13} + return fileDescriptor_73e4f72503e71065, []int{13} } func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +443,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{14} + return fileDescriptor_73e4f72503e71065, []int{14} } func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +471,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{15} + return fileDescriptor_73e4f72503e71065, []int{15} } func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +499,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{16} + return fileDescriptor_73e4f72503e71065, []int{16} } func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -524,10 +524,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } +func (*VolumeAttributesClass) ProtoMessage() {} +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{17} +} +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClass.Merge(m, src) +} +func (m *VolumeAttributesClass) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClass) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo + +func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } +func (*VolumeAttributesClassList) ProtoMessage() {} +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{18} +} +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) +} +func (m *VolumeAttributesClassList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo + func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{17} + return fileDescriptor_73e4f72503e71065, []int{19} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +611,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{18} + return fileDescriptor_73e4f72503e71065, []int{20} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -600,120 +656,127 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass.ParametersEntry") + proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) -} - -var fileDescriptor_7d2980599fd0de80 = []byte{ - // 1651 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x3b, 0x6f, 0x1b, 0xc7, - 0x16, 0xd6, 0x8a, 0xd4, 0x6b, 0x28, 0x59, 0xd2, 0x48, 0xf6, 0xe5, 0x55, 0x41, 0x0a, 0xbc, 0xb8, - 0xd7, 0xb2, 0x61, 0x2f, 0x6d, 0x5d, 0xc7, 0x30, 0x0c, 0xb8, 0xd0, 0x4a, 0x4a, 0x4c, 0x5b, 0x94, - 0xe5, 0xa1, 0x60, 0x18, 0x46, 0x8a, 0x0c, 0x77, 0x47, 0xd4, 0x58, 0xdc, 0x87, 0x77, 0x86, 0x8a, - 0x99, 0x2a, 0x69, 0x52, 0x07, 0x29, 0xd2, 0x07, 0xc8, 0x5f, 0x48, 0x80, 0xa4, 0x49, 0x19, 0x03, - 0x01, 0x02, 0x23, 0x95, 0x2b, 0x22, 0x66, 0x7e, 0x42, 0x80, 0x14, 0x42, 0x8a, 0x60, 0x66, 0x87, - 0xdc, 0x17, 0x69, 0x49, 0x29, 0xd8, 0x69, 0xcf, 0xe3, 0x3b, 0x67, 0xe6, 0x7c, 0xe7, 0xcc, 0xa1, - 0xc0, 0xe6, 0xd1, 0x1d, 0xa6, 0x53, 0xb7, 0x7c, 0xd4, 0xaa, 0x13, 0xdf, 0x21, 0x9c, 0xb0, 0xf2, - 0x31, 0x71, 0x2c, 0xd7, 0x2f, 0x2b, 0x05, 0xf6, 0x68, 0x99, 0x71, 0xd7, 0xc7, 0x0d, 0x52, 0x3e, - 0xbe, 0x59, 0x27, 0x1c, 0xdf, 0x2c, 0x37, 0x88, 0x43, 0x7c, 0xcc, 0x89, 0xa5, 0x7b, 0xbe, 0xcb, - 0x5d, 0xb8, 0x12, 0xd8, 0xea, 0xd8, 0xa3, 0xba, 0xb2, 0xd5, 0x95, 0xed, 0xca, 0xf5, 0x06, 0xe5, - 0x87, 0xad, 0xba, 0x6e, 0xba, 0x76, 0xb9, 0xe1, 0x36, 0xdc, 0xb2, 0x74, 0xa9, 0xb7, 0x0e, 0xe4, - 0x97, 0xfc, 0x90, 0x7f, 0x05, 0x50, 0x2b, 0xa5, 0x48, 0x58, 0xd3, 0xf5, 0x45, 0xcc, 0x64, 0xb8, - 0x95, 0x5b, 0xa1, 0x8d, 0x8d, 0xcd, 0x43, 0xea, 0x10, 0xbf, 0x5d, 0xf6, 0x8e, 0x1a, 0xd2, 0xc9, - 0x27, 0xcc, 0x6d, 0xf9, 0x26, 0x39, 0x97, 0x17, 0x2b, 0xdb, 0x84, 0xe3, 0x41, 0xb1, 0xca, 0xc3, - 0xbc, 0xfc, 0x96, 0xc3, 0xa9, 0x9d, 0x0e, 0x73, 0xfb, 0x34, 0x07, 0x66, 0x1e, 0x12, 0x1b, 0x27, - 0xfd, 0x4a, 0x3f, 0x68, 0x60, 0x66, 0xb3, 0x56, 0xd9, 0xf2, 0xe9, 0x31, 0xf1, 0xe1, 0x47, 0x60, - 0x5a, 0x64, 0x64, 0x61, 0x8e, 0xf3, 0xda, 0xaa, 0xb6, 0x96, 0x5b, 0xbf, 0xa1, 0x87, 0x97, 0xdc, - 0x07, 0xd6, 0xbd, 0xa3, 0x86, 0x10, 0x30, 0x5d, 0x58, 0xeb, 0xc7, 0x37, 0xf5, 0x47, 0xf5, 0xe7, - 0xc4, 0xe4, 0x55, 0xc2, 0xb1, 0x01, 0x5f, 0x75, 0x8a, 0x63, 0xdd, 0x4e, 0x11, 0x84, 0x32, 0xd4, - 0x47, 0x85, 0x0f, 0x41, 0x96, 0x79, 0xc4, 0xcc, 0x8f, 0x4b, 0xf4, 0x2b, 0xfa, 0xf0, 0x12, 0xea, - 0xfd, 0xb4, 0x6a, 0x1e, 0x31, 0x8d, 0x59, 0x05, 0x9b, 0x15, 0x5f, 0x48, 0x82, 0x94, 0xbe, 0xd7, - 0xc0, 0x5c, 0xdf, 0x6a, 0x87, 0x32, 0x0e, 0x3f, 0x4c, 0x1d, 0x40, 0x3f, 0xdb, 0x01, 0x84, 0xb7, - 0x4c, 0x7f, 0x41, 0xc5, 0x99, 0xee, 0x49, 0x22, 0xc9, 0x3f, 0x00, 0x13, 0x94, 0x13, 0x9b, 0xe5, - 0xc7, 0x57, 0x33, 0x6b, 0xb9, 0xf5, 0xff, 0x9e, 0x29, 0x7b, 0x63, 0x4e, 0x21, 0x4e, 0x54, 0x84, - 0x2f, 0x0a, 0x20, 0x4a, 0x5f, 0x67, 0x23, 0xb9, 0x8b, 0x33, 0xc1, 0xbb, 0xe0, 0x02, 0xe6, 0x1c, - 0x9b, 0x87, 0x88, 0xbc, 0x68, 0x51, 0x9f, 0x58, 0xf2, 0x04, 0xd3, 0x06, 0xec, 0x76, 0x8a, 0x17, - 0x36, 0x62, 0x1a, 0x94, 0xb0, 0x14, 0xbe, 0x9e, 0x6b, 0x55, 0x9c, 0x03, 0xf7, 0x91, 0x53, 0x75, - 0x5b, 0x0e, 0x97, 0x17, 0xac, 0x7c, 0xf7, 0x62, 0x1a, 0x94, 0xb0, 0x84, 0x26, 0x58, 0x3e, 0x76, - 0x9b, 0x2d, 0x9b, 0xec, 0xd0, 0x03, 0x62, 0xb6, 0xcd, 0x26, 0xa9, 0xba, 0x16, 0x61, 0xf9, 0xcc, - 0x6a, 0x66, 0x6d, 0xc6, 0x28, 0x77, 0x3b, 0xc5, 0xe5, 0x27, 0x03, 0xf4, 0x27, 0x9d, 0xe2, 0xd2, - 0x00, 0x39, 0x1a, 0x08, 0x06, 0xef, 0x81, 0x79, 0x75, 0x43, 0x9b, 0xd8, 0xc3, 0x26, 0xe5, 0xed, - 0x7c, 0x56, 0x66, 0xb8, 0xd4, 0xed, 0x14, 0xe7, 0x6b, 0x71, 0x15, 0x4a, 0xda, 0xc2, 0xfb, 0x60, - 0xee, 0x80, 0x7d, 0xe0, 0xbb, 0x2d, 0x6f, 0xcf, 0x6d, 0x52, 0xb3, 0x9d, 0x9f, 0x58, 0xd5, 0xd6, - 0x66, 0x8c, 0x52, 0xb7, 0x53, 0x9c, 0x7b, 0xbf, 0x16, 0x51, 0x9c, 0x24, 0x05, 0x28, 0xee, 0x08, - 0x09, 0x98, 0xe3, 0xee, 0x11, 0x71, 0xc4, 0xd5, 0x11, 0xc6, 0x59, 0x7e, 0x52, 0xd6, 0x72, 0xed, - 0x5d, 0xb5, 0xdc, 0x8f, 0x38, 0x18, 0x17, 0x55, 0x39, 0xe7, 0xa2, 0x52, 0x86, 0xe2, 0xa8, 0x70, - 0x13, 0x2c, 0xfa, 0x41, 0x71, 0x18, 0x22, 0x5e, 0xab, 0xde, 0xa4, 0xec, 0x30, 0x3f, 0x25, 0x4f, - 0x7c, 0xb1, 0xdb, 0x29, 0x2e, 0xa2, 0xa4, 0x12, 0xa5, 0xed, 0x4b, 0xdf, 0x69, 0x60, 0x6a, 0xb3, - 0x56, 0xd9, 0x75, 0x2d, 0x32, 0x82, 0xd6, 0xac, 0xc4, 0x5a, 0xf3, 0xf2, 0x29, 0xe4, 0x16, 0x49, - 0x0d, 0x6d, 0xcc, 0x3f, 0x82, 0xc6, 0x14, 0x36, 0x6a, 0xb2, 0xac, 0x82, 0xac, 0x83, 0x6d, 0x22, - 0x53, 0x9f, 0x09, 0x7d, 0x76, 0xb1, 0x4d, 0x90, 0xd4, 0xc0, 0xff, 0x81, 0x49, 0xc7, 0xb5, 0x48, - 0x65, 0x4b, 0x26, 0x30, 0x63, 0x5c, 0x50, 0x36, 0x93, 0xbb, 0x52, 0x8a, 0x94, 0x16, 0xde, 0x02, - 0xb3, 0xdc, 0xf5, 0xdc, 0xa6, 0xdb, 0x68, 0x3f, 0x24, 0xed, 0x1e, 0x4d, 0x17, 0xba, 0x9d, 0xe2, - 0xec, 0x7e, 0x44, 0x8e, 0x62, 0x56, 0xb0, 0x0e, 0x72, 0xb8, 0xd9, 0x74, 0x4d, 0xcc, 0x71, 0xbd, - 0x49, 0x24, 0xf7, 0x72, 0xeb, 0xe5, 0x77, 0x9d, 0x31, 0xe0, 0xb6, 0x08, 0x8e, 0xd4, 0x6c, 0x67, - 0xc6, 0x7c, 0xb7, 0x53, 0xcc, 0x6d, 0x84, 0x38, 0x28, 0x0a, 0x5a, 0xfa, 0x56, 0x03, 0x39, 0x75, - 0xea, 0x11, 0x0c, 0xa3, 0xfb, 0xf1, 0x61, 0xf4, 0x9f, 0x33, 0xd4, 0x6b, 0xc8, 0x28, 0x32, 0xfb, - 0x69, 0xcb, 0x39, 0xb4, 0x0f, 0xa6, 0x2c, 0x59, 0x34, 0x96, 0xd7, 0x24, 0xf4, 0x95, 0x33, 0x40, - 0xab, 0x59, 0x37, 0xaf, 0x02, 0x4c, 0x05, 0xdf, 0x0c, 0xf5, 0xa0, 0x4a, 0x7f, 0x66, 0x00, 0xdc, - 0xac, 0x55, 0x12, 0x9d, 0x3e, 0x02, 0x5a, 0x53, 0x30, 0x2b, 0x98, 0xd3, 0xe3, 0x86, 0xa2, 0xf7, - 0xff, 0xcf, 0x58, 0x09, 0x5c, 0x27, 0xcd, 0x1a, 0x69, 0x12, 0x93, 0xbb, 0x7e, 0x40, 0xb2, 0xdd, - 0x08, 0x18, 0x8a, 0x41, 0xc3, 0x2d, 0xb0, 0xd0, 0x1b, 0x5c, 0x4d, 0xcc, 0x98, 0x20, 0x77, 0x3e, - 0x23, 0xc9, 0x9c, 0x57, 0x29, 0x2e, 0xd4, 0x12, 0x7a, 0x94, 0xf2, 0x80, 0x4f, 0xc1, 0xb4, 0x19, - 0x9d, 0x91, 0xa7, 0xd0, 0x46, 0xef, 0xad, 0x1e, 0xfa, 0xe3, 0x16, 0x76, 0x38, 0xe5, 0x6d, 0x63, - 0x56, 0x50, 0xa6, 0x3f, 0x4c, 0xfb, 0x68, 0x90, 0x81, 0x45, 0x1b, 0xbf, 0xa4, 0x76, 0xcb, 0x0e, - 0xc8, 0x5d, 0xa3, 0x9f, 0x10, 0x39, 0x49, 0xcf, 0x1f, 0x42, 0x0e, 0xb1, 0x6a, 0x12, 0x0c, 0xa5, - 0xf1, 0x4b, 0x3f, 0x6b, 0xe0, 0x52, 0xba, 0xf0, 0x23, 0x68, 0x90, 0x5a, 0xbc, 0x41, 0xf4, 0x53, - 0x58, 0x9c, 0x48, 0x70, 0x48, 0xaf, 0x7c, 0x39, 0x09, 0x66, 0xa3, 0x35, 0x1c, 0x01, 0x81, 0xdf, - 0x03, 0x39, 0xcf, 0x77, 0x8f, 0x29, 0xa3, 0xae, 0x43, 0x7c, 0x35, 0x1d, 0x97, 0x94, 0x4b, 0x6e, - 0x2f, 0x54, 0xa1, 0xa8, 0x1d, 0x6c, 0x02, 0xe0, 0x61, 0x1f, 0xdb, 0x84, 0x8b, 0x4e, 0xce, 0xc8, - 0x3b, 0xb8, 0xf3, 0xae, 0x3b, 0x88, 0x1e, 0x4b, 0xdf, 0xeb, 0xbb, 0x6e, 0x3b, 0xdc, 0x6f, 0x87, - 0x29, 0x86, 0x0a, 0x14, 0xc1, 0x87, 0x47, 0x60, 0xce, 0x27, 0x66, 0x13, 0x53, 0x5b, 0x3d, 0xd0, - 0x59, 0x99, 0xe6, 0xb6, 0x78, 0x28, 0x51, 0x54, 0x71, 0xd2, 0x29, 0xde, 0x48, 0x2f, 0xdb, 0xfa, - 0x1e, 0xf1, 0x19, 0x65, 0x9c, 0x38, 0x3c, 0xa0, 0x4e, 0xcc, 0x07, 0xc5, 0xb1, 0xc5, 0x13, 0x60, - 0x8b, 0xd5, 0xe5, 0x91, 0xc7, 0xa9, 0xeb, 0xb0, 0xfc, 0x44, 0xf8, 0x04, 0x54, 0x23, 0x72, 0x14, - 0xb3, 0x82, 0x3b, 0x60, 0x59, 0x4c, 0xeb, 0x8f, 0x83, 0x00, 0xdb, 0x2f, 0x3d, 0xec, 0x88, 0xab, - 0xca, 0x4f, 0xca, 0x57, 0x39, 0x2f, 0xf6, 0x9c, 0x8d, 0x01, 0x7a, 0x34, 0xd0, 0x0b, 0x3e, 0x05, - 0x8b, 0xc1, 0xa2, 0x63, 0x50, 0xc7, 0xa2, 0x4e, 0x43, 0xac, 0x39, 0xf2, 0x81, 0x9f, 0x31, 0xae, - 0x8a, 0xde, 0x78, 0x92, 0x54, 0x9e, 0x0c, 0x12, 0xa2, 0x34, 0x08, 0x7c, 0x01, 0x16, 0x65, 0x44, - 0x62, 0xa9, 0xc1, 0x42, 0x09, 0xcb, 0x4f, 0xa7, 0xb7, 0x14, 0x71, 0x75, 0x82, 0x48, 0xbd, 0xf1, - 0xd3, 0x1b, 0x53, 0xfb, 0xc4, 0xb7, 0x8d, 0x7f, 0xab, 0x7a, 0x2d, 0x6e, 0x24, 0xa1, 0x50, 0x1a, - 0x7d, 0xe5, 0x1e, 0x98, 0x4f, 0x14, 0x1c, 0x2e, 0x80, 0xcc, 0x11, 0x69, 0x07, 0xef, 0x35, 0x12, - 0x7f, 0xc2, 0x65, 0x30, 0x71, 0x8c, 0x9b, 0x2d, 0x12, 0x30, 0x10, 0x05, 0x1f, 0x77, 0xc7, 0xef, - 0x68, 0xa5, 0x1f, 0x35, 0x10, 0x1b, 0x6c, 0x23, 0x68, 0xee, 0x6a, 0xbc, 0xb9, 0xd7, 0xce, 0x4a, - 0xec, 0x21, 0x6d, 0xfd, 0x99, 0x06, 0x66, 0xa3, 0xfb, 0x1c, 0xbc, 0x06, 0xa6, 0x71, 0xcb, 0xa2, - 0xc4, 0x31, 0x7b, 0x3b, 0x4b, 0x3f, 0x9b, 0x0d, 0x25, 0x47, 0x7d, 0x0b, 0xb1, 0xed, 0x91, 0x97, - 0x1e, 0xf5, 0xb1, 0x60, 0x5a, 0x8d, 0x98, 0xae, 0x63, 0x31, 0x79, 0x4d, 0x99, 0x60, 0x50, 0x6e, - 0x27, 0x95, 0x28, 0x6d, 0x5f, 0xfa, 0x66, 0x1c, 0x2c, 0x04, 0x04, 0x09, 0x96, 0x7d, 0x9b, 0x38, - 0x7c, 0x04, 0xe3, 0x05, 0xc5, 0xd6, 0xbe, 0x1b, 0xa7, 0xaf, 0x44, 0x61, 0x76, 0xc3, 0xf6, 0x3f, - 0xf8, 0x0c, 0x4c, 0x32, 0x8e, 0x79, 0x8b, 0xc9, 0xe7, 0x2f, 0xb7, 0xbe, 0x7e, 0x2e, 0x54, 0xe9, - 0x19, 0xee, 0x7f, 0xc1, 0x37, 0x52, 0x88, 0xa5, 0x9f, 0x34, 0xb0, 0x9c, 0x74, 0x19, 0x01, 0xe1, - 0x1e, 0xc7, 0x09, 0x77, 0xed, 0x3c, 0x27, 0x1a, 0x42, 0xba, 0x5f, 0x35, 0x70, 0x29, 0x75, 0x78, - 0xf9, 0xce, 0x8a, 0x59, 0xe5, 0x25, 0x26, 0xe2, 0x6e, 0xb8, 0x3e, 0xcb, 0x59, 0xb5, 0x37, 0x40, - 0x8f, 0x06, 0x7a, 0xc1, 0xe7, 0x60, 0x81, 0x3a, 0x4d, 0xea, 0x10, 0xf5, 0x2c, 0x87, 0xe5, 0x1e, - 0x38, 0x50, 0x92, 0xc8, 0xb2, 0xcc, 0xcb, 0x62, 0x7b, 0xa9, 0x24, 0x50, 0x50, 0x0a, 0xb7, 0xf4, - 0xcb, 0x80, 0xf2, 0xc8, 0xb5, 0x52, 0x74, 0x94, 0x94, 0x10, 0x3f, 0xd5, 0x51, 0x4a, 0x8e, 0xfa, - 0x16, 0x92, 0x41, 0xf2, 0x2a, 0x54, 0xa2, 0xe7, 0x63, 0x90, 0xf4, 0x8c, 0x30, 0x48, 0x7e, 0x23, - 0x85, 0x28, 0x32, 0x11, 0x6b, 0x5b, 0x64, 0x3d, 0xeb, 0x67, 0xb2, 0xab, 0xe4, 0xa8, 0x6f, 0x51, - 0xfa, 0x2b, 0x33, 0xa0, 0x4a, 0x92, 0x8a, 0x91, 0x23, 0xf5, 0x7e, 0xab, 0x27, 0x8f, 0x64, 0xf5, - 0x8f, 0x64, 0xc1, 0xaf, 0x34, 0x00, 0x71, 0x1f, 0xa2, 0xda, 0xa3, 0x6a, 0xc0, 0xa7, 0x07, 0xe7, - 0xef, 0x10, 0x7d, 0x23, 0x05, 0x16, 0xbc, 0xd5, 0x2b, 0x2a, 0x09, 0x98, 0x36, 0x40, 0x03, 0x32, - 0x80, 0x14, 0xe4, 0x02, 0xe9, 0xb6, 0xef, 0xbb, 0xbe, 0x6a, 0xd9, 0xcb, 0xa7, 0x27, 0x24, 0xcd, - 0x8d, 0x82, 0xfc, 0x4d, 0x14, 0xfa, 0x9f, 0x74, 0x8a, 0xb9, 0x88, 0x1e, 0x45, 0xb1, 0x45, 0x28, - 0x8b, 0x84, 0xa1, 0xb2, 0xff, 0x20, 0xd4, 0x16, 0x19, 0x1e, 0x2a, 0x82, 0xbd, 0xb2, 0x0d, 0xfe, - 0x35, 0xe4, 0x82, 0xce, 0xf5, 0xb6, 0x7d, 0xae, 0x81, 0x68, 0x0c, 0xb8, 0x03, 0xb2, 0x9c, 0xaa, - 0x4e, 0xcc, 0xad, 0x5f, 0x3d, 0xdb, 0x84, 0xd9, 0xa7, 0x36, 0x09, 0x07, 0xa5, 0xf8, 0x42, 0x12, - 0x05, 0x5e, 0x01, 0x53, 0x36, 0x61, 0x0c, 0x37, 0x54, 0xe4, 0xf0, 0x07, 0x54, 0x35, 0x10, 0xa3, - 0x9e, 0xbe, 0x74, 0x1b, 0x2c, 0x0d, 0xf8, 0x49, 0x0a, 0x8b, 0x60, 0xc2, 0x94, 0xff, 0xf0, 0x11, - 0x09, 0x4d, 0x18, 0x33, 0x62, 0xca, 0x6c, 0xca, 0xff, 0xf3, 0x04, 0x72, 0xe3, 0xfa, 0xab, 0xb7, - 0x85, 0xb1, 0xd7, 0x6f, 0x0b, 0x63, 0x6f, 0xde, 0x16, 0xc6, 0x3e, 0xed, 0x16, 0xb4, 0x57, 0xdd, - 0x82, 0xf6, 0xba, 0x5b, 0xd0, 0xde, 0x74, 0x0b, 0xda, 0x6f, 0xdd, 0x82, 0xf6, 0xc5, 0xef, 0x85, - 0xb1, 0x67, 0x53, 0xea, 0xbe, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xce, 0xa8, 0xf1, 0x40, 0x9a, - 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_73e4f72503e71065) +} + +var fileDescriptor_73e4f72503e71065 = []byte{ + // 1728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x4f, 0xc7, 0xce, 0x57, 0x39, 0x99, 0x24, 0x35, 0x99, 0xc5, 0xeb, 0x83, 0x1d, 0x19, 0xc1, + 0x66, 0x46, 0x4b, 0x7b, 0x12, 0x96, 0xd5, 0x68, 0xa5, 0x95, 0x48, 0x27, 0x81, 0xf5, 0x6e, 0x9c, + 0xc9, 0x96, 0xa3, 0xd1, 0x6a, 0xc5, 0x81, 0x72, 0xbb, 0xe2, 0xd4, 0xc6, 0xfd, 0xb1, 0x5d, 0xd5, + 0x21, 0xe6, 0x04, 0x17, 0xce, 0x88, 0x03, 0x7f, 0x01, 0xff, 0x02, 0x48, 0x70, 0xe1, 0xc8, 0x48, + 0x48, 0x68, 0xe1, 0xc2, 0x9e, 0x2c, 0xc6, 0xf3, 0x27, 0x20, 0x71, 0x88, 0x38, 0xa0, 0xaa, 0x2e, + 0xf7, 0xb7, 0x27, 0x36, 0x2b, 0xf9, 0xe6, 0x7a, 0x1f, 0xbf, 0x7a, 0x55, 0xef, 0xf7, 0x5e, 0xbd, + 0x36, 0x78, 0x72, 0xfd, 0x8c, 0xe9, 0xd4, 0x69, 0x60, 0x97, 0x36, 0x18, 0x77, 0x3c, 0xdc, 0x23, + 0x8d, 0x9b, 0xfd, 0x0e, 0xe1, 0x78, 0xbf, 0xd1, 0x23, 0x36, 0xf1, 0x30, 0x27, 0x5d, 0xdd, 0xf5, + 0x1c, 0xee, 0xc0, 0x4a, 0x60, 0xab, 0x63, 0x97, 0xea, 0xca, 0x56, 0x57, 0xb6, 0x95, 0xef, 0xf5, + 0x28, 0xbf, 0xf2, 0x3b, 0xba, 0xe9, 0x58, 0x8d, 0x9e, 0xd3, 0x73, 0x1a, 0xd2, 0xa5, 0xe3, 0x5f, + 0xca, 0x95, 0x5c, 0xc8, 0x5f, 0x01, 0x54, 0xa5, 0x1e, 0xdb, 0xd6, 0x74, 0x3c, 0xb1, 0x67, 0x7a, + 0xbb, 0xca, 0x7b, 0x91, 0x8d, 0x85, 0xcd, 0x2b, 0x6a, 0x13, 0x6f, 0xd0, 0x70, 0xaf, 0x7b, 0xd2, + 0xc9, 0x23, 0xcc, 0xf1, 0x3d, 0x93, 0xcc, 0xe4, 0xc5, 0x1a, 0x16, 0xe1, 0x38, 0x6f, 0xaf, 0xc6, + 0x24, 0x2f, 0xcf, 0xb7, 0x39, 0xb5, 0xb2, 0xdb, 0xbc, 0x7f, 0x9f, 0x03, 0x33, 0xaf, 0x88, 0x85, + 0xd3, 0x7e, 0xf5, 0x3f, 0x69, 0x60, 0xed, 0xa8, 0xdd, 0x3c, 0xf6, 0xe8, 0x0d, 0xf1, 0xe0, 0x4f, + 0xc1, 0xaa, 0x88, 0xa8, 0x8b, 0x39, 0x2e, 0x6b, 0xbb, 0xda, 0x5e, 0xe9, 0xe0, 0xa9, 0x1e, 0x5d, + 0x72, 0x08, 0xac, 0xbb, 0xd7, 0x3d, 0x21, 0x60, 0xba, 0xb0, 0xd6, 0x6f, 0xf6, 0xf5, 0xe7, 0x9d, + 0x2f, 0x88, 0xc9, 0x5b, 0x84, 0x63, 0x03, 0xbe, 0x1c, 0xd6, 0x16, 0x46, 0xc3, 0x1a, 0x88, 0x64, + 0x28, 0x44, 0x85, 0x9f, 0x80, 0x22, 0x73, 0x89, 0x59, 0x5e, 0x94, 0xe8, 0x8f, 0xf5, 0xc9, 0x29, + 0xd4, 0xc3, 0xb0, 0xda, 0x2e, 0x31, 0x8d, 0x75, 0x05, 0x5b, 0x14, 0x2b, 0x24, 0x41, 0xea, 0x7f, + 0xd4, 0xc0, 0x46, 0x68, 0x75, 0x4a, 0x19, 0x87, 0x3f, 0xc9, 0x1c, 0x40, 0x9f, 0xee, 0x00, 0xc2, + 0x5b, 0x86, 0xbf, 0xa5, 0xf6, 0x59, 0x1d, 0x4b, 0x62, 0xc1, 0x7f, 0x0c, 0x96, 0x28, 0x27, 0x16, + 0x2b, 0x2f, 0xee, 0x16, 0xf6, 0x4a, 0x07, 0xdf, 0x99, 0x2a, 0x7a, 0x63, 0x43, 0x21, 0x2e, 0x35, + 0x85, 0x2f, 0x0a, 0x20, 0xea, 0xff, 0x2c, 0xc6, 0x62, 0x17, 0x67, 0x82, 0x1f, 0x80, 0x07, 0x98, + 0x73, 0x6c, 0x5e, 0x21, 0xf2, 0xa5, 0x4f, 0x3d, 0xd2, 0x95, 0x27, 0x58, 0x35, 0xe0, 0x68, 0x58, + 0x7b, 0x70, 0x98, 0xd0, 0xa0, 0x94, 0xa5, 0xf0, 0x75, 0x9d, 0x6e, 0xd3, 0xbe, 0x74, 0x9e, 0xdb, + 0x2d, 0xc7, 0xb7, 0xb9, 0xbc, 0x60, 0xe5, 0x7b, 0x9e, 0xd0, 0xa0, 0x94, 0x25, 0x34, 0xc1, 0xce, + 0x8d, 0xd3, 0xf7, 0x2d, 0x72, 0x4a, 0x2f, 0x89, 0x39, 0x30, 0xfb, 0xa4, 0xe5, 0x74, 0x09, 0x2b, + 0x17, 0x76, 0x0b, 0x7b, 0x6b, 0x46, 0x63, 0x34, 0xac, 0xed, 0xbc, 0xc8, 0xd1, 0xdf, 0x0d, 0x6b, + 0x0f, 0x73, 0xe4, 0x28, 0x17, 0x0c, 0x7e, 0x08, 0x36, 0xd5, 0x0d, 0x1d, 0x61, 0x17, 0x9b, 0x94, + 0x0f, 0xca, 0x45, 0x19, 0xe1, 0xc3, 0xd1, 0xb0, 0xb6, 0xd9, 0x4e, 0xaa, 0x50, 0xda, 0x16, 0x7e, + 0x04, 0x36, 0x2e, 0xd9, 0x8f, 0x3d, 0xc7, 0x77, 0xcf, 0x9d, 0x3e, 0x35, 0x07, 0xe5, 0xa5, 0x5d, + 0x6d, 0x6f, 0xcd, 0xa8, 0x8f, 0x86, 0xb5, 0x8d, 0x1f, 0xb5, 0x63, 0x8a, 0xbb, 0xb4, 0x00, 0x25, + 0x1d, 0x21, 0x01, 0x1b, 0xdc, 0xb9, 0x26, 0xb6, 0xb8, 0x3a, 0xc2, 0x38, 0x2b, 0x2f, 0xcb, 0x5c, + 0xee, 0xbd, 0x29, 0x97, 0x17, 0x31, 0x07, 0xe3, 0x91, 0x4a, 0xe7, 0x46, 0x5c, 0xca, 0x50, 0x12, + 0x15, 0x1e, 0x81, 0x6d, 0x2f, 0x48, 0x0e, 0x43, 0xc4, 0xf5, 0x3b, 0x7d, 0xca, 0xae, 0xca, 0x2b, + 0xf2, 0xc4, 0x8f, 0x46, 0xc3, 0xda, 0x36, 0x4a, 0x2b, 0x51, 0xd6, 0x1e, 0xbe, 0x07, 0xd6, 0x19, + 0x39, 0xa5, 0xb6, 0x7f, 0x1b, 0xe4, 0x74, 0x55, 0xfa, 0x6f, 0x8d, 0x86, 0xb5, 0xf5, 0xf6, 0x49, + 0x24, 0x47, 0x09, 0xab, 0xfa, 0x1f, 0x34, 0xb0, 0x72, 0xd4, 0x6e, 0x9e, 0x39, 0x5d, 0x32, 0x87, + 0x82, 0x6e, 0x26, 0x0a, 0xfa, 0x9d, 0x7b, 0x4a, 0x42, 0x04, 0x35, 0xb1, 0x9c, 0xff, 0x1d, 0x94, + 0xb3, 0xb0, 0x51, 0xfd, 0x68, 0x17, 0x14, 0x6d, 0x6c, 0x11, 0x19, 0xfa, 0x5a, 0xe4, 0x73, 0x86, + 0x2d, 0x82, 0xa4, 0x06, 0x7e, 0x17, 0x2c, 0xdb, 0x4e, 0x97, 0x34, 0x8f, 0x65, 0x00, 0x6b, 0xc6, + 0x03, 0x65, 0xb3, 0x7c, 0x26, 0xa5, 0x48, 0x69, 0xc5, 0x55, 0x72, 0xc7, 0x75, 0xfa, 0x4e, 0x6f, + 0xf0, 0x09, 0x19, 0x8c, 0xc9, 0x2d, 0xaf, 0xf2, 0x22, 0x26, 0x47, 0x09, 0x2b, 0xd8, 0x01, 0x25, + 0xdc, 0xef, 0x3b, 0x26, 0xe6, 0xb8, 0xd3, 0x27, 0x92, 0xb1, 0xa5, 0x83, 0xc6, 0x9b, 0xce, 0x18, + 0x54, 0x84, 0xd8, 0x1c, 0xa9, 0x17, 0x81, 0x19, 0x9b, 0xa3, 0x61, 0xad, 0x74, 0x18, 0xe1, 0xa0, + 0x38, 0x68, 0xfd, 0xf7, 0x1a, 0x28, 0xa9, 0x53, 0xcf, 0xa1, 0x85, 0x7d, 0x94, 0x6c, 0x61, 0xdf, + 0x9e, 0x22, 0x5f, 0x13, 0x1a, 0x98, 0x19, 0x86, 0x2d, 0xbb, 0xd7, 0x05, 0x58, 0xe9, 0xca, 0xa4, + 0xb1, 0xb2, 0x26, 0xa1, 0x1f, 0x4f, 0x01, 0xad, 0x3a, 0xe4, 0xa6, 0xda, 0x60, 0x25, 0x58, 0x33, + 0x34, 0x86, 0xaa, 0xff, 0xa7, 0x00, 0xe0, 0x51, 0xbb, 0x99, 0xea, 0x0f, 0x73, 0xa0, 0x35, 0x05, + 0xeb, 0x82, 0x39, 0x63, 0x6e, 0x28, 0x7a, 0x7f, 0x7f, 0xca, 0x4c, 0xe0, 0x0e, 0xe9, 0xb7, 0x49, + 0x9f, 0x98, 0xdc, 0xf1, 0x02, 0x92, 0x9d, 0xc5, 0xc0, 0x50, 0x02, 0x1a, 0x1e, 0x83, 0xad, 0x71, + 0xbb, 0xeb, 0x63, 0xc6, 0x04, 0xb9, 0xcb, 0x05, 0x49, 0xe6, 0xb2, 0x0a, 0x71, 0xab, 0x9d, 0xd2, + 0xa3, 0x8c, 0x07, 0xfc, 0x0c, 0xac, 0x9a, 0xf1, 0xce, 0x7a, 0x0f, 0x6d, 0xf4, 0xf1, 0xc0, 0xa2, + 0x7f, 0xea, 0x63, 0x9b, 0x53, 0x3e, 0x30, 0xd6, 0x05, 0x65, 0xc2, 0x16, 0x1c, 0xa2, 0x41, 0x06, + 0xb6, 0x2d, 0x7c, 0x4b, 0x2d, 0xdf, 0x0a, 0xc8, 0xdd, 0xa6, 0x3f, 0x27, 0xb2, 0xff, 0xce, 0xbe, + 0x85, 0x6c, 0x7d, 0xad, 0x34, 0x18, 0xca, 0xe2, 0xd7, 0xff, 0xaa, 0x81, 0xb7, 0xb2, 0x89, 0x9f, + 0x43, 0x81, 0xb4, 0x93, 0x05, 0xa2, 0xdf, 0xc3, 0xe2, 0x54, 0x80, 0x13, 0x6a, 0xe5, 0x37, 0xcb, + 0x60, 0x3d, 0x9e, 0xc3, 0x39, 0x10, 0xf8, 0x07, 0xa0, 0xe4, 0x7a, 0xce, 0x0d, 0x65, 0xd4, 0xb1, + 0x89, 0xa7, 0xba, 0xe3, 0x43, 0xe5, 0x52, 0x3a, 0x8f, 0x54, 0x28, 0x6e, 0x07, 0xfb, 0x00, 0xb8, + 0xd8, 0xc3, 0x16, 0xe1, 0xa2, 0x92, 0x0b, 0xf2, 0x0e, 0x9e, 0xbd, 0xe9, 0x0e, 0xe2, 0xc7, 0xd2, + 0xcf, 0x43, 0xd7, 0x13, 0x9b, 0x7b, 0x83, 0x28, 0xc4, 0x48, 0x81, 0x62, 0xf8, 0xf0, 0x1a, 0x6c, + 0x78, 0xc4, 0xec, 0x63, 0x6a, 0xa9, 0x67, 0xbd, 0x28, 0xc3, 0x3c, 0x11, 0xcf, 0x2b, 0x8a, 0x2b, + 0xee, 0x86, 0xb5, 0xa7, 0xd9, 0x11, 0x5d, 0x3f, 0x27, 0x1e, 0xa3, 0x8c, 0x13, 0x9b, 0x07, 0xd4, + 0x49, 0xf8, 0xa0, 0x24, 0xb6, 0x78, 0x02, 0x2c, 0xf1, 0x40, 0x3e, 0x77, 0x39, 0x75, 0x6c, 0x56, + 0x5e, 0x8a, 0x9e, 0x80, 0x56, 0x4c, 0x8e, 0x12, 0x56, 0xf0, 0x14, 0xec, 0x88, 0x6e, 0xfd, 0xb3, + 0x60, 0x83, 0x93, 0x5b, 0x17, 0xdb, 0xe2, 0xaa, 0xca, 0xcb, 0xf2, 0x2d, 0x2e, 0x8b, 0xe9, 0xe8, + 0x30, 0x47, 0x8f, 0x72, 0xbd, 0xe0, 0x67, 0x60, 0x3b, 0x18, 0x8f, 0x0c, 0x6a, 0x77, 0xa9, 0xdd, + 0x13, 0xc3, 0x91, 0x1c, 0x0b, 0xd6, 0x8c, 0x27, 0xa2, 0x36, 0x5e, 0xa4, 0x95, 0x77, 0x79, 0x42, + 0x94, 0x05, 0x81, 0x5f, 0x82, 0x6d, 0xb9, 0x23, 0xe9, 0xaa, 0xc6, 0x42, 0x09, 0x2b, 0xaf, 0x66, + 0x67, 0x1b, 0x71, 0x75, 0x82, 0x48, 0xe3, 0xf6, 0x33, 0x6e, 0x53, 0x17, 0xc4, 0xb3, 0x8c, 0xb7, + 0x55, 0xbe, 0xb6, 0x0f, 0xd3, 0x50, 0x28, 0x8b, 0x5e, 0xf9, 0x10, 0x6c, 0xa6, 0x12, 0x0e, 0xb7, + 0x40, 0xe1, 0x9a, 0x0c, 0x82, 0xf7, 0x1a, 0x89, 0x9f, 0x70, 0x07, 0x2c, 0xdd, 0xe0, 0xbe, 0x4f, + 0x02, 0x06, 0xa2, 0x60, 0xf1, 0xc1, 0xe2, 0x33, 0xad, 0xfe, 0x67, 0x0d, 0x24, 0x1a, 0xdb, 0x1c, + 0x8a, 0xbb, 0x95, 0x2c, 0xee, 0xbd, 0x69, 0x89, 0x3d, 0xa1, 0xac, 0x7f, 0xa9, 0x81, 0xf5, 0xf8, + 0x14, 0x08, 0xdf, 0x05, 0xab, 0xd8, 0xef, 0x52, 0x62, 0x9b, 0xe3, 0x99, 0x25, 0x8c, 0xe6, 0x50, + 0xc9, 0x51, 0x68, 0x21, 0x66, 0x44, 0x72, 0xeb, 0x52, 0x0f, 0x0b, 0xa6, 0xb5, 0x89, 0xe9, 0xd8, + 0x5d, 0x26, 0xaf, 0xa9, 0x10, 0x34, 0xca, 0x93, 0xb4, 0x12, 0x65, 0xed, 0xeb, 0xbf, 0x5b, 0x04, + 0x5b, 0x01, 0x41, 0x82, 0x4f, 0x04, 0x8b, 0xd8, 0x7c, 0x0e, 0xed, 0x05, 0x25, 0xc6, 0xbe, 0xa7, + 0xf7, 0x8f, 0x44, 0x51, 0x74, 0x93, 0xe6, 0x3f, 0xf8, 0x39, 0x58, 0x66, 0x1c, 0x73, 0x9f, 0xc9, + 0xe7, 0xaf, 0x74, 0x70, 0x30, 0x13, 0xaa, 0xf4, 0x8c, 0xe6, 0xbf, 0x60, 0x8d, 0x14, 0x62, 0xfd, + 0x2f, 0x1a, 0xd8, 0x49, 0xbb, 0xcc, 0x81, 0x70, 0x9f, 0x26, 0x09, 0xf7, 0xee, 0x2c, 0x27, 0x9a, + 0x40, 0xba, 0x7f, 0x68, 0xe0, 0xad, 0xcc, 0xe1, 0xe5, 0x3b, 0x2b, 0x7a, 0x95, 0x9b, 0xea, 0x88, + 0x67, 0xd1, 0xf8, 0x2c, 0x7b, 0xd5, 0x79, 0x8e, 0x1e, 0xe5, 0x7a, 0xc1, 0x2f, 0xc0, 0x16, 0xb5, + 0xfb, 0xd4, 0x26, 0xea, 0x59, 0x8e, 0xd2, 0x9d, 0xdb, 0x50, 0xd2, 0xc8, 0x32, 0xcd, 0x3b, 0x62, + 0x7a, 0x69, 0xa6, 0x50, 0x50, 0x06, 0xb7, 0xfe, 0xb7, 0x9c, 0xf4, 0xc8, 0xb1, 0x52, 0x54, 0x94, + 0x94, 0x10, 0x2f, 0x53, 0x51, 0x4a, 0x8e, 0x42, 0x0b, 0xc9, 0x20, 0x79, 0x15, 0x2a, 0xd0, 0xd9, + 0x18, 0x24, 0x3d, 0x63, 0x0c, 0x92, 0x6b, 0xa4, 0x10, 0x45, 0x24, 0x62, 0x6c, 0x8b, 0x8d, 0x67, + 0x61, 0x24, 0x67, 0x4a, 0x8e, 0x42, 0x8b, 0xfa, 0x7f, 0x0b, 0x39, 0x59, 0x92, 0x54, 0x8c, 0x1d, + 0x69, 0xfc, 0x85, 0x9f, 0x3e, 0x52, 0x37, 0x3c, 0x52, 0x17, 0xfe, 0x56, 0x03, 0x10, 0x87, 0x10, + 0xad, 0x31, 0x55, 0x03, 0x3e, 0x7d, 0x3c, 0x7b, 0x85, 0xe8, 0x87, 0x19, 0xb0, 0xe0, 0xad, 0xae, + 0xa8, 0x20, 0x60, 0xd6, 0x00, 0xe5, 0x44, 0x00, 0x29, 0x28, 0x05, 0xd2, 0x13, 0xcf, 0x73, 0x3c, + 0x55, 0xb2, 0xef, 0xdc, 0x1f, 0x90, 0x34, 0x37, 0xaa, 0xf2, 0x9b, 0x28, 0xf2, 0xbf, 0x1b, 0xd6, + 0x4a, 0x31, 0x3d, 0x8a, 0x63, 0x8b, 0xad, 0xba, 0x24, 0xda, 0xaa, 0xf8, 0x7f, 0x6c, 0x75, 0x4c, + 0x26, 0x6f, 0x15, 0xc3, 0xae, 0x9c, 0x80, 0x6f, 0x4d, 0xb8, 0xa0, 0x99, 0xde, 0xb6, 0xd7, 0x8b, + 0xe0, 0x51, 0x78, 0xff, 0x1e, 0xed, 0xf8, 0x9c, 0xb0, 0x79, 0x4d, 0x7e, 0x07, 0x00, 0x04, 0x9f, + 0x4f, 0x92, 0xaa, 0xc1, 0xe0, 0x17, 0x7a, 0x1c, 0x87, 0x1a, 0x14, 0xb3, 0x82, 0x7e, 0xce, 0xd8, + 0x77, 0x38, 0x15, 0xb9, 0xe2, 0x87, 0x9b, 0x75, 0xfe, 0xfb, 0xa6, 0x13, 0xc4, 0xdf, 0x35, 0xf0, + 0x76, 0x6e, 0x20, 0x73, 0xe8, 0xec, 0x2f, 0x92, 0x9d, 0x7d, 0x7f, 0xe6, 0xcb, 0x9a, 0xd0, 0xde, + 0x7f, 0xa5, 0x81, 0x38, 0x3b, 0xe1, 0x29, 0x28, 0x72, 0xaa, 0x7a, 0x78, 0xe9, 0xe0, 0xc9, 0x74, + 0x27, 0xb8, 0xa0, 0x16, 0x89, 0x9e, 0x58, 0xb1, 0x42, 0x12, 0x05, 0x3e, 0x06, 0x2b, 0x16, 0x61, + 0x0c, 0xf7, 0xc6, 0xc4, 0x08, 0x3f, 0xbd, 0x5b, 0x81, 0x18, 0x8d, 0xf5, 0xf5, 0xf7, 0xc1, 0xc3, + 0x9c, 0x3f, 0x33, 0x60, 0x0d, 0x2c, 0x99, 0xf2, 0xcf, 0x28, 0x11, 0xd0, 0x92, 0xb1, 0x26, 0x0e, + 0x70, 0x24, 0xff, 0x85, 0x0a, 0xe4, 0xc6, 0x0f, 0x5f, 0xbe, 0xaa, 0x2e, 0x7c, 0xf5, 0xaa, 0xba, + 0xf0, 0xf5, 0xab, 0xea, 0xc2, 0x2f, 0x46, 0x55, 0xed, 0xe5, 0xa8, 0xaa, 0x7d, 0x35, 0xaa, 0x6a, + 0x5f, 0x8f, 0xaa, 0xda, 0xbf, 0x46, 0x55, 0xed, 0xd7, 0xaf, 0xab, 0x0b, 0x9f, 0x57, 0x26, 0xff, + 0xcf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x30, 0xdb, 0x24, 0x04, 0x18, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -826,6 +889,16 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SELinuxMount != nil { + i-- + if *m.SELinuxMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } if m.RequiresRepublish != nil { i-- if *m.RequiresRepublish { @@ -1654,6 +1727,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1795,6 +1977,9 @@ func (m *CSIDriverSpec) Size() (n int) { if m.RequiresRepublish != nil { n += 2 } + if m.SELinuxMount != nil { + n += 2 + } return n } @@ -2073,6 +2258,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } +func (m *VolumeAttributesClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *VolumeAttributesClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -2148,6 +2371,7 @@ func (this *CSIDriverSpec) String() string { `FSGroupPolicy:` + valueToStringGenerated(this.FSGroupPolicy) + `,`, `TokenRequests:` + repeatedStringForTokenRequests + `,`, `RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`, + `SELinuxMount:` + valueToStringGenerated(this.SELinuxMount) + `,`, `}`, }, "") return s @@ -2370,6 +2594,44 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } +func (this *VolumeAttributesClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&VolumeAttributesClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttributesClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttributesClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttributesClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -2844,6 +3106,27 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.RequiresRepublish = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SELinuxMount = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5119,6 +5402,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttributesClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index bfe8280d5..dfef3f6cc 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -28,7 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/api/storage/v1beta1"; // CSIDriver captures information about a Container Storage Interface (CSI) // volume driver deployed on the cluster. @@ -47,9 +47,9 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; } @@ -58,7 +58,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -82,17 +82,16 @@ message CSIDriverSpec { // +optional optional bool attachRequired = 1; - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace @@ -112,14 +111,14 @@ message CSIDriverSpec { // +optional optional bool podInfoOnMount = 2; - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -128,12 +127,12 @@ message CSIDriverSpec { // This field is immutable. // // +optional + // +listType=atomic repeated string volumeLifecycleModes = 3; - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -146,14 +145,10 @@ message CSIDriverSpec { // // This field was immutable in Kubernetes <= 1.22 and now is mutable. // - // This is a beta field and only available when the CSIStorageCapacity - // feature is enabled. The default is false. - // // +optional - // +featureGate=CSIStorageCapacity optional bool storageCapacity = 4; - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -163,10 +158,11 @@ message CSIDriverSpec { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional optional string fsGroupPolicy = 5; - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -186,7 +182,7 @@ message CSIDriverSpec { // +listType=atomic repeated TokenRequest tokenRequests = 6; - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -196,6 +192,28 @@ message CSIDriverSpec { // // +optional optional bool requiresRepublish = 7; + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + optional bool seLinuxMount = 8; } // DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. @@ -211,7 +229,7 @@ message CSIDriverSpec { // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -219,7 +237,7 @@ message CSINode { // CSINodeDriver holds information about the specification of one CSI driver installed on a node message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. optional string name = 1; @@ -246,6 +264,7 @@ message CSINodeDriver { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic repeated string topologyKeys = 3; // allocatable represents the volume resources of a node that are available for scheduling. @@ -258,7 +277,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -270,6 +289,8 @@ message CSINodeSpec { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated CSINodeDriver drivers = 1; } @@ -290,9 +311,13 @@ message CSINodeSpec { // // The producer of these objects can decide which approach is more suitable. // -// They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate -// is enabled there and a CSI driver opts into capacity-aware scheduling with -// CSIDriver.StorageCapacity. +// They are consumed by the kube-scheduler when a CSI driver opts into +// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler +// compares the MaximumVolumeSize against the requested size of pending volumes +// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back +// to a comparison against the less precise Capacity. If that is also unset, +// the scheduler assumes that capacity is insufficient and tries some other +// node. message CSIStorageCapacity { // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that @@ -304,18 +329,18 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -323,19 +348,19 @@ message CSIStorageCapacity { // This field is immutable. optional string storageClassName = 3; - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // // The semantic is currently (CSI spec 1.2) defined as: // The available capacity, in bytes, of the storage that can be used // to provision volumes. If not set, that information is currently - // unavailable and treated like zero capacity. + // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -347,7 +372,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -355,11 +380,9 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name + // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; } @@ -372,44 +395,45 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. optional string provisioner = 2; - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional map parameters = 3; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional optional string reclaimPolicy = 4; - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic repeated string mountOptions = 5; - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional optional string volumeBindingMode = 7; - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -417,19 +441,19 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of StorageClasses + // items is the list of StorageClasses repeated StorageClass items = 2; } // TokenRequest contains parameters of a service account token. message TokenRequest { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. optional string audience = 1; - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -444,13 +468,13 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. optional VolumeAttachmentSpec spec = 2; - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -462,9 +486,9 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } @@ -473,7 +497,7 @@ message VolumeAttachmentList { // in future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; @@ -484,57 +508,97 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). optional string attacher = 1; - // Source represents the volume that should be attached. + // source represents the volume that should be attached. optional VolumeAttachmentSource source = 2; - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. optional string nodeName = 3; } // VolumeAttachmentStatus is the status of a VolumeAttachment request. message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. optional bool attached = 1; - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional map attachmentMetadata = 2; - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional optional VolumeError attachError = 3; - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional optional VolumeError detachError = 4; } +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +message VolumeAttributesClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Name of the CSI driver + // This field is immutable. + optional string driverName = 2; + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + map parameters = 3; +} + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +message VolumeAttributesClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of VolumeAttributesClass objects. + repeated VolumeAttributesClass items = 2; +} + // VolumeError captures an error encountered during a volume operation. message VolumeError { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -543,7 +607,7 @@ message VolumeError { // VolumeNodeResources is a set of resource limits for scheduling of volumes. message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index a281d0f26..e2214ef2f 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -58,6 +58,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CSIStorageCapacity{}, &CSIStorageCapacityList{}, + + &VolumeAttributesClass{}, + &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 524d8b534..ce294e3db 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -36,41 +36,43 @@ import ( // according to etcd is in ObjectMeta.Name. type StorageClass struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Provisioner indicates the type of the provisioner. + // provisioner indicates the type of the provisioner. Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - // Parameters holds the parameters for the provisioner that should + // parameters holds the parameters for the provisioner that should // create volumes of this storage class. // +optional Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. + // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. + // Defaults to Delete. // +optional ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. + // e.g. ["ro", "soft"]. Not validated - // mount of the PVs will simply fail if one is invalid. // +optional + // +listType=atomic MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - // AllowVolumeExpansion shows whether the storage class allow volume expand + // allowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - // VolumeBindingMode indicates how PersistentVolumeClaims should be + // volumeBindingMode indicates how PersistentVolumeClaims should be // provisioned and bound. When unset, VolumeBindingImmediate is used. // This field is only honored by servers that enable the VolumeScheduling feature. // +optional VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - // Restrict the node topologies where volumes can be dynamically provisioned. + // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. // Each volume plugin defines its own supported topology specifications. // An empty TopologySelectorTerm list means there is no topology restriction. // This field is only honored by servers that enable the VolumeScheduling feature. @@ -87,12 +89,13 @@ type StorageClass struct { // StorageClassList is a collection of storage classes. type StorageClassList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of StorageClasses + // items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -130,11 +133,11 @@ type VolumeAttachment struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the desired attach/detach volume behavior. + // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status of the VolumeAttachment request. + // status represents status of the VolumeAttachment request. // Populated by the entity completing the attach or detach // operation, i.e. the external-attacher. // +optional @@ -149,25 +152,26 @@ type VolumeAttachment struct { // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of VolumeAttachments + // items is the list of VolumeAttachments Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this + // attacher indicates the name of the volume driver that MUST handle this // request. This is the name returned by GetPluginName(). Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - // Source represents the volume that should be attached. + // source represents the volume that should be attached. Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - // The node that the volume should be attached to. + // nodeName represents the node that the volume should be attached to. NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` } @@ -176,7 +180,7 @@ type VolumeAttachmentSpec struct { // in future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. + // persistentVolumeName represents the name of the persistent volume to attach. // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` @@ -192,26 +196,26 @@ type VolumeAttachmentSource struct { // VolumeAttachmentStatus is the status of a VolumeAttachment request. type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. + // attached indicates the volume is successfully attached. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed + // attachmentMetadata is populated with any + // information returned by the attach operation, upon successful attach, that must be passed // into subsequent WaitForAttach or Mount calls. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - // The last error encountered during attach operation, if any. + // attachError represents the last error encountered during attach operation, if any. // This field must only be set by the entity completing the attach // operation, i.e. the external-attacher. // +optional AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - // The last error encountered during detach operation, if any. + // detachError represents the last error encountered during detach operation, if any. // This field must only be set by the entity completing the detach // operation, i.e. the external-attacher. // +optional @@ -220,11 +224,11 @@ type VolumeAttachmentStatus struct { // VolumeError captures an error encountered during a volume operation. type VolumeError struct { - // Time the error was encountered. + // time represents the time the error was encountered. // +optional Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - // String detailing the error encountered during Attach or Detach operation. + // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive // information. // +optional @@ -259,7 +263,7 @@ type CSIDriver struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Specification of the CSI Driver. + // spec represents the specification of the CSI Driver. Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } @@ -299,17 +303,16 @@ type CSIDriverSpec struct { // +optional AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. + // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) + // during mount operations, if set to true. // If set to false, pod information will not be passed on mount. // Default is false. + // // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. + // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. + // + // The following VolumeContext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace @@ -329,14 +332,14 @@ type CSIDriverSpec struct { // +optional PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage defined by the + // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. + // + // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec + // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. + // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. + // // For more information about implementing this mode, see // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html // A driver can support one or more of these modes and @@ -345,13 +348,12 @@ type CSIDriverSpec struct { // This field is immutable. // // +optional + // +listType=atomic VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` - // If set to true, storageCapacity indicates that the CSI - // volume driver wants pod scheduling to consider the storage + // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage // capacity that the driver deployment will report by creating - // CSIStorageCapacity objects with capacity information. - // + // CSIStorageCapacity objects with capacity information, if set to true. // // The check can be enabled immediately when deploying a driver. // In that case, provisioning new volumes with late binding @@ -364,14 +366,10 @@ type CSIDriverSpec struct { // // This field was immutable in Kubernetes <= 1.22 and now is mutable. // - // This is a beta field and only available when the CSIStorageCapacity - // feature is enabled. The default is false. - // // +optional - // +featureGate=CSIStorageCapacity StorageCapacity *bool `json:"storageCapacity,omitempty" protobuf:"bytes,4,opt,name=storageCapacity"` - // Defines if the underlying volume supports changing ownership and + // fsGroupPolicy defines if the underlying volume supports changing ownership and // permission of the volume before being mounted. // Refer to the specific FSGroupPolicy values for additional details. // @@ -381,10 +379,11 @@ type CSIDriverSpec struct { // to determine if Kubernetes should modify ownership and permissions of the volume. // With the default policy the defined fsGroup will only be applied // if a fstype is defined and the volume's access mode contains ReadWriteOnce. + // // +optional FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"` - // TokenRequests indicates the CSI driver needs pods' service account + // tokenRequests indicates the CSI driver needs pods' service account // tokens it is mounting volume for to do necessary authentication. Kubelet // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. // The CSI driver should parse and validate the following VolumeContext: @@ -404,7 +403,7 @@ type CSIDriverSpec struct { // +listType=atomic TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"` - // RequiresRepublish indicates the CSI driver wants `NodePublishVolume` + // requiresRepublish indicates the CSI driver wants `NodePublishVolume` // being periodically called to reflect any possible change in the mounted // volume. This field defaults to false. // @@ -414,6 +413,28 @@ type CSIDriverSpec struct { // // +optional RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"` + + // seLinuxMount specifies if the CSI driver supports "-o context" + // mount option. + // + // When "true", the CSI driver must ensure that all volumes provided by this CSI + // driver can be mounted separately with different `-o context` options. This is + // typical for storage backends that provide volumes as filesystems on block + // devices or as independent shared volumes. + // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount + // option when mounting a ReadWriteOncePod volume used in Pod that has + // explicitly set SELinux context. In the future, it may be expanded to other + // volume AccessModes. In any case, Kubernetes will ensure that the volume is + // mounted only with a single SELinux context. + // + // When "false", Kubernetes won't pass any special SELinux mount options to the driver. + // This is typical for volumes that represent subdirectories of a bigger shared filesystem. + // + // Default is "false". + // + // +featureGate=SELinuxMountReadWriteOncePod + // +optional + SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"` } // FSGroupPolicy specifies if a CSI Driver supports modifying @@ -449,12 +470,11 @@ type VolumeLifecycleMode string // TokenRequest contains parameters of a service account token. type TokenRequest struct { - // Audience is the intended audience of the token in "TokenRequestSpec". + // audience is the intended audience of the token in "TokenRequestSpec". // It will default to the audiences of kube apiserver. - // Audience string `json:"audience" protobuf:"bytes,1,opt,name=audience"` - // ExpirationSeconds is the duration of validity of the token in "TokenRequestSpec". + // expirationSeconds is the duration of validity of the token in "TokenRequestSpec". // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec" // // +optional @@ -517,12 +537,14 @@ type CSINodeSpec struct { // If all drivers in the list are uninstalled, this can become empty. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` } // CSINodeDriver holds information about the specification of one CSI driver installed on a node type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. + // name represents the name of the CSI driver that this object refers to. // This MUST be the same name returned by the CSI GetPluginName() call for // that driver. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` @@ -549,6 +571,7 @@ type CSINodeDriver struct { // It is possible for different nodes to use different topology keys. // This can be empty if driver does not support topology. // +optional + // +listType=atomic TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` // allocatable represents the volume resources of a node that are available for scheduling. @@ -558,7 +581,7 @@ type CSINodeDriver struct { // VolumeNodeResources is a set of resource limits for scheduling of volumes. type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. // A volume that is both attached and mounted on a node is considered to be used once, not twice. // The same rule applies for a unique volume that is shared among multiple pods on the same node. // If this field is nil, then the supported number of volumes on this node is unbounded. @@ -588,6 +611,8 @@ type CSINodeList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.21 +// +k8s:prerelease-lifecycle-gen:deprecated=1.24 +// +k8s:prerelease-lifecycle-gen:replacement=storage.k8s.io,v1,CSIStorageCapacity // CSIStorageCapacity stores the result of one CSI GetCapacity call. // For a given StorageClass, this describes the available capacity in a @@ -606,11 +631,16 @@ type CSINodeList struct { // // The producer of these objects can decide which approach is more suitable. // -// They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate -// is enabled there and a CSI driver opts into capacity-aware scheduling with -// CSIDriver.StorageCapacity. +// They are consumed by the kube-scheduler when a CSI driver opts into +// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler +// compares the MaximumVolumeSize against the requested size of pending volumes +// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back +// to a comparison against the less precise Capacity. If that is also unset, +// the scheduler assumes that capacity is insufficient and tries some other +// node. type CSIStorageCapacity struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. The name has no particular meaning. It must be // be a DNS subdomain (dots allowed, 253 characters). To ensure that // there are no conflicts with other CSI drivers on the cluster, the recommendation @@ -623,7 +653,7 @@ type CSIStorageCapacity struct { // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NodeTopology defines which nodes have access to the storage + // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is // not accessible from any node in the cluster. If empty, the // storage is accessible from all nodes. This field is @@ -632,7 +662,7 @@ type CSIStorageCapacity struct { // +optional NodeTopology *metav1.LabelSelector `json:"nodeTopology,omitempty" protobuf:"bytes,2,opt,name=nodeTopology"` - // The name of the StorageClass that the reported capacity applies to. + // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass // object (non-empty, DNS subdomain). If that object no longer exists, // the CSIStorageCapacity object is obsolete and should be removed by its @@ -640,19 +670,19 @@ type CSIStorageCapacity struct { // This field is immutable. StorageClassName string `json:"storageClassName" protobuf:"bytes,3,name=storageClassName"` - // Capacity is the value reported by the CSI driver in its GetCapacityResponse + // capacity is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // // The semantic is currently (CSI spec 1.2) defined as: // The available capacity, in bytes, of the storage that can be used // to provision volumes. If not set, that information is currently - // unavailable and treated like zero capacity. + // unavailable. // // +optional Capacity *resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,4,opt,name=capacity"` - // MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse + // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the // previous fields. // @@ -669,17 +699,70 @@ type CSIStorageCapacity struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.21 +// +k8s:prerelease-lifecycle-gen:deprecated=1.24 +// +k8s:prerelease-lifecycle-gen:replacement=storage.k8s.io,v1,CSIStorageCapacityList // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { metav1.TypeMeta `json:",inline"` + // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of CSIStorageCapacity objects. - // +listType=map - // +listMapKey=name + // items is the list of CSIStorageCapacity objects. Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +type VolumeAttributesClass struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Name of the CSI driver + // This field is immutable. + DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +type VolumeAttributesClassList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of VolumeAttributesClass objects. + Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index f4dbf0fef..8c1a66350 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -24,13 +24,13 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CSIDriver = map[string]string{ "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", + "spec": "spec represents the specification of the CSI Driver.", } func (CSIDriver) SwaggerDoc() map[string]string { @@ -50,12 +50,13 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", - "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", - "storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.", - "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", - "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", - "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", + "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", + "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", } func (CSIDriverSpec) SwaggerDoc() map[string]string { @@ -74,7 +75,7 @@ func (CSINode) SwaggerDoc() map[string]string { var map_CSINodeDriver = map[string]string{ "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "name": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", @@ -104,12 +105,12 @@ func (CSINodeSpec) SwaggerDoc() map[string]string { } var map_CSIStorageCapacity = map[string]string{ - "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.", + "": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", "metadata": "Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "nodeTopology": "NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", - "storageClassName": "The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", - "capacity": "Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.", - "maximumVolumeSize": "MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "nodeTopology": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "storageClassName": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "capacity": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "maximumVolumeSize": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", } func (CSIStorageCapacity) SwaggerDoc() map[string]string { @@ -119,7 +120,7 @@ func (CSIStorageCapacity) SwaggerDoc() map[string]string { var map_CSIStorageCapacityList = map[string]string{ "": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of CSIStorageCapacity objects.", + "items": "items is the list of CSIStorageCapacity objects.", } func (CSIStorageCapacityList) SwaggerDoc() map[string]string { @@ -129,13 +130,13 @@ func (CSIStorageCapacityList) SwaggerDoc() map[string]string { var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "provisioner": "provisioner indicates the type of the provisioner.", + "parameters": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "mountOptions": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "allowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { @@ -145,7 +146,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", + "items": "items is the list of StorageClasses", } func (StorageClassList) SwaggerDoc() map[string]string { @@ -154,8 +155,8 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_TokenRequest = map[string]string{ "": "TokenRequest contains parameters of a service account token.", - "audience": "Audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", - "expirationSeconds": "ExpirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", + "audience": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "expirationSeconds": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\"", } func (TokenRequest) SwaggerDoc() map[string]string { @@ -165,8 +166,8 @@ func (TokenRequest) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "spec": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } func (VolumeAttachment) SwaggerDoc() map[string]string { @@ -176,7 +177,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", + "items": "items is the list of VolumeAttachments", } func (VolumeAttachmentList) SwaggerDoc() map[string]string { @@ -185,7 +186,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { var map_VolumeAttachmentSource = map[string]string{ "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", + "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } func (VolumeAttachmentSource) SwaggerDoc() map[string]string { @@ -194,9 +195,9 @@ func (VolumeAttachmentSource) SwaggerDoc() map[string]string { var map_VolumeAttachmentSpec = map[string]string{ "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", + "attacher": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "source represents the volume that should be attached.", + "nodeName": "nodeName represents the node that the volume should be attached to.", } func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { @@ -205,20 +206,41 @@ func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { var map_VolumeAttachmentStatus = map[string]string{ "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "attached": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", } func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } +var map_VolumeAttributesClass = map[string]string{ + "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "driverName": "Name of the CSI driver This field is immutable.", + "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", +} + +func (VolumeAttributesClass) SwaggerDoc() map[string]string { + return map_VolumeAttributesClass +} + +var map_VolumeAttributesClassList = map[string]string{ + "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of VolumeAttributesClass objects.", +} + +func (VolumeAttributesClassList) SwaggerDoc() map[string]string { + return map_VolumeAttributesClassList +} + var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "time": "time represents the time the error was encountered.", + "message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { @@ -227,7 +249,7 @@ func (VolumeError) SwaggerDoc() map[string]string { var map_VolumeNodeResources = map[string]string{ "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", + "count": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", } func (VolumeNodeResources) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 5411ed8c0..d87aa6b90 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -127,6 +127,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { *out = new(bool) **out = **in } + if in.SELinuxMount != nil { + in, out := &in.SELinuxMount, &out.SELinuxMount + *out = new(bool) + **out = **in + } return } @@ -574,6 +579,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { + if in == nil { + return nil + } + out := new(VolumeAttributesClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttributesClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { + if in == nil { + return nil + } + out := new(VolumeAttributesClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go index 7b69b2cbc..4be57dc0d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go @@ -133,6 +133,12 @@ func (in *CSIStorageCapacity) APILifecycleDeprecated() (major, minor int) { return 1, 24 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *CSIStorageCapacity) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "CSIStorageCapacity"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *CSIStorageCapacity) APILifecycleRemoved() (major, minor int) { @@ -151,6 +157,12 @@ func (in *CSIStorageCapacityList) APILifecycleDeprecated() (major, minor int) { return 1, 24 } +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *CSIStorageCapacityList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "CSIStorageCapacityList"} +} + // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *CSIStorageCapacityList) APILifecycleRemoved() (major, minor int) { @@ -252,3 +264,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go b/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go new file mode 100644 index 000000000..192f9ff3c --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=storagemigration.k8s.io + +package v1alpha1 // import "k8s.io/api/storagemigration/v1alpha1" diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go new file mode 100644 index 000000000..ed57f34b5 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go @@ -0,0 +1,1688 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/storagemigration/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{0} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo + +func (m *MigrationCondition) Reset() { *m = MigrationCondition{} } +func (*MigrationCondition) ProtoMessage() {} +func (*MigrationCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{1} +} +func (m *MigrationCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MigrationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MigrationCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_MigrationCondition.Merge(m, src) +} +func (m *MigrationCondition) XXX_Size() int { + return m.Size() +} +func (m *MigrationCondition) XXX_DiscardUnknown() { + xxx_messageInfo_MigrationCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_MigrationCondition proto.InternalMessageInfo + +func (m *StorageVersionMigration) Reset() { *m = StorageVersionMigration{} } +func (*StorageVersionMigration) ProtoMessage() {} +func (*StorageVersionMigration) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{2} +} +func (m *StorageVersionMigration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigration) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigration.Merge(m, src) +} +func (m *StorageVersionMigration) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigration) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigration.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigration proto.InternalMessageInfo + +func (m *StorageVersionMigrationList) Reset() { *m = StorageVersionMigrationList{} } +func (*StorageVersionMigrationList) ProtoMessage() {} +func (*StorageVersionMigrationList) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{3} +} +func (m *StorageVersionMigrationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigrationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigrationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigrationList.Merge(m, src) +} +func (m *StorageVersionMigrationList) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigrationList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigrationList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigrationList proto.InternalMessageInfo + +func (m *StorageVersionMigrationSpec) Reset() { *m = StorageVersionMigrationSpec{} } +func (*StorageVersionMigrationSpec) ProtoMessage() {} +func (*StorageVersionMigrationSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{4} +} +func (m *StorageVersionMigrationSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigrationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigrationSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigrationSpec.Merge(m, src) +} +func (m *StorageVersionMigrationSpec) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigrationSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigrationSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigrationSpec proto.InternalMessageInfo + +func (m *StorageVersionMigrationStatus) Reset() { *m = StorageVersionMigrationStatus{} } +func (*StorageVersionMigrationStatus) ProtoMessage() {} +func (*StorageVersionMigrationStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_0117377a57b172b9, []int{5} +} +func (m *StorageVersionMigrationStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageVersionMigrationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageVersionMigrationStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageVersionMigrationStatus.Merge(m, src) +} +func (m *StorageVersionMigrationStatus) XXX_Size() int { + return m.Size() +} +func (m *StorageVersionMigrationStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StorageVersionMigrationStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageVersionMigrationStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.api.storagemigration.v1alpha1.GroupVersionResource") + proto.RegisterType((*MigrationCondition)(nil), "k8s.io.api.storagemigration.v1alpha1.MigrationCondition") + proto.RegisterType((*StorageVersionMigration)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigration") + proto.RegisterType((*StorageVersionMigrationList)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationList") + proto.RegisterType((*StorageVersionMigrationSpec)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationSpec") + proto.RegisterType((*StorageVersionMigrationStatus)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationStatus") +} + +func init() { + proto.RegisterFile("k8s.io/api/storagemigration/v1alpha1/generated.proto", fileDescriptor_0117377a57b172b9) +} + +var fileDescriptor_0117377a57b172b9 = []byte{ + // 719 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x4f, 0x13, 0x4f, + 0x14, 0xef, 0x42, 0x0b, 0x7c, 0xa7, 0x5f, 0xc0, 0x4c, 0x14, 0x1a, 0x8c, 0x5b, 0x53, 0x09, 0x41, + 0xa3, 0xb3, 0xd2, 0x10, 0x43, 0x30, 0x1e, 0x28, 0x07, 0xa3, 0x81, 0x98, 0x0c, 0xc8, 0xc1, 0x78, + 0x70, 0xba, 0x1d, 0xb7, 0x43, 0xd9, 0x9d, 0xcd, 0xce, 0x6c, 0x13, 0x6e, 0xfe, 0x09, 0x1e, 0xfc, + 0x93, 0x3c, 0x70, 0x31, 0xe1, 0xc8, 0xc5, 0x2a, 0xf5, 0xbf, 0xe0, 0x64, 0x66, 0x76, 0x76, 0xfb, + 0x8b, 0x62, 0x13, 0x6e, 0x3b, 0xef, 0xbd, 0xcf, 0x67, 0xde, 0x7b, 0x9f, 0x79, 0x6f, 0xc1, 0x66, + 0x6b, 0x4b, 0x20, 0xc6, 0x1d, 0x12, 0x32, 0x47, 0x48, 0x1e, 0x11, 0x8f, 0xfa, 0xcc, 0x8b, 0x88, + 0x64, 0x3c, 0x70, 0xda, 0x1b, 0xe4, 0x24, 0x6c, 0x92, 0x0d, 0xc7, 0xa3, 0x01, 0x8d, 0x88, 0xa4, + 0x0d, 0x14, 0x46, 0x5c, 0x72, 0xb8, 0x9a, 0xa0, 0x10, 0x09, 0x19, 0x1a, 0x46, 0xa1, 0x14, 0xb5, + 0xf2, 0xcc, 0x63, 0xb2, 0x19, 0xd7, 0x91, 0xcb, 0x7d, 0xc7, 0xe3, 0x1e, 0x77, 0x34, 0xb8, 0x1e, + 0x7f, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, 0x09, 0xe9, 0x4a, 0xa5, 0x2f, 0x15, 0x97, 0x47, 0xd4, 0x69, + 0x8f, 0x5c, 0xbc, 0xd2, 0x97, 0xae, 0x4f, 0xdc, 0x26, 0x0b, 0x68, 0x74, 0xea, 0x84, 0x2d, 0x4f, + 0x19, 0x84, 0xe3, 0x53, 0x49, 0xae, 0x43, 0x39, 0xe3, 0x50, 0x51, 0x1c, 0x48, 0xe6, 0xd3, 0x11, + 0xc0, 0x8b, 0x7f, 0x01, 0x84, 0xdb, 0xa4, 0x3e, 0x19, 0xc6, 0x55, 0xbe, 0x59, 0xe0, 0xee, 0xeb, + 0x88, 0xc7, 0xe1, 0x11, 0x8d, 0x04, 0xe3, 0x01, 0xa6, 0x82, 0xc7, 0x91, 0x4b, 0xe1, 0x23, 0x50, + 0xf0, 0x94, 0xbd, 0x64, 0x3d, 0xb4, 0xd6, 0xff, 0xab, 0xcd, 0x9f, 0x75, 0xca, 0xb9, 0x6e, 0xa7, + 0x5c, 0xd0, 0xc1, 0x38, 0xf1, 0xc1, 0xc7, 0x60, 0xb6, 0x9d, 0xe0, 0x4a, 0x53, 0x3a, 0x6c, 0xd1, + 0x84, 0xcd, 0xa6, 0x74, 0xa9, 0x1f, 0x3e, 0x05, 0x73, 0x91, 0xe1, 0x2e, 0x4d, 0xeb, 0xd8, 0x3b, + 0x26, 0x76, 0x2e, 0xbd, 0x13, 0x67, 0x11, 0x95, 0x9f, 0x53, 0x00, 0xee, 0xa7, 0xfa, 0xec, 0xf2, + 0xa0, 0xc1, 0xd4, 0x07, 0xdc, 0x06, 0x79, 0x79, 0x1a, 0x52, 0x93, 0xd3, 0x9a, 0x21, 0xc8, 0x1f, + 0x9e, 0x86, 0xf4, 0xaa, 0x53, 0x5e, 0x1a, 0x45, 0x28, 0x0f, 0xd6, 0x18, 0xb8, 0x07, 0x66, 0x84, + 0x24, 0x32, 0x16, 0x26, 0xd5, 0x4d, 0x83, 0x9e, 0x39, 0xd0, 0xd6, 0xab, 0x4e, 0xf9, 0x1a, 0x39, + 0x51, 0xc6, 0x94, 0x44, 0x61, 0xc3, 0x01, 0x8f, 0xc1, 0xc2, 0x09, 0x11, 0xf2, 0x7d, 0xd8, 0x20, + 0x92, 0x1e, 0x32, 0x3f, 0x29, 0xaa, 0x58, 0x7d, 0x82, 0x7a, 0x0f, 0x2d, 0x13, 0x02, 0x85, 0x2d, + 0x4f, 0x19, 0x04, 0x52, 0x7a, 0xa3, 0xf6, 0x06, 0x52, 0x88, 0xda, 0x92, 0xc9, 0x60, 0x61, 0x6f, + 0x80, 0x09, 0x0f, 0x31, 0xc3, 0x35, 0x30, 0x13, 0x51, 0x22, 0x78, 0x50, 0xca, 0xeb, 0xcc, 0x17, + 0xd2, 0xcc, 0xb1, 0xb6, 0x62, 0xe3, 0x55, 0x6a, 0xf8, 0x54, 0x08, 0xe2, 0xd1, 0x52, 0x61, 0x50, + 0x8d, 0xfd, 0xc4, 0x8c, 0x53, 0x7f, 0xe5, 0xc7, 0x14, 0x58, 0x3e, 0x48, 0xc6, 0xc0, 0x28, 0x95, + 0xf5, 0x0e, 0x7e, 0x02, 0x73, 0x2a, 0xcd, 0x06, 0x91, 0x44, 0x37, 0xba, 0x58, 0x7d, 0x3e, 0x59, + 0x51, 0xef, 0xea, 0xc7, 0xd4, 0x95, 0xfb, 0x54, 0x92, 0x1a, 0x34, 0x37, 0x83, 0x9e, 0x0d, 0x67, + 0xac, 0xd0, 0x05, 0x79, 0x11, 0x52, 0x57, 0x0b, 0x51, 0xac, 0xee, 0xa0, 0x49, 0x66, 0x13, 0x8d, + 0x49, 0xf7, 0x20, 0xa4, 0x6e, 0xed, 0xff, 0xf4, 0x25, 0xa8, 0x13, 0xd6, 0xe4, 0xb0, 0x95, 0xe9, + 0x9d, 0x28, 0xb3, 0x7b, 0xbb, 0x6b, 0x34, 0x55, 0xaf, 0xf5, 0x83, 0xcf, 0xa1, 0xf2, 0xcb, 0x02, + 0xf7, 0xc7, 0x20, 0xf7, 0x98, 0x90, 0xf0, 0xe3, 0x48, 0x4f, 0xd1, 0x64, 0x3d, 0x55, 0x68, 0xdd, + 0xd1, 0x6c, 0x5a, 0x52, 0x4b, 0x5f, 0x3f, 0xeb, 0xa0, 0xc0, 0x24, 0xf5, 0xd5, 0xcb, 0x9e, 0x5e, + 0x2f, 0x56, 0x5f, 0xdd, 0xaa, 0xd2, 0xde, 0xa8, 0xbf, 0x51, 0x9c, 0x38, 0xa1, 0xae, 0x7c, 0x1f, + 0x5f, 0xa1, 0x6a, 0x3a, 0x6c, 0xf6, 0xcd, 0x77, 0x52, 0xe1, 0xf6, 0x64, 0x69, 0x5c, 0xb7, 0x7d, + 0x6e, 0xda, 0x0d, 0xf0, 0x25, 0x98, 0x77, 0x79, 0x20, 0x59, 0x10, 0xd3, 0x43, 0xde, 0xa2, 0xe9, + 0xea, 0xb9, 0x67, 0x20, 0xf3, 0xbb, 0xfd, 0x4e, 0x3c, 0x18, 0x5b, 0x39, 0xb7, 0xc0, 0x83, 0x1b, + 0x25, 0x86, 0x27, 0x00, 0xb8, 0xe9, 0xd0, 0x8b, 0x92, 0xa5, 0x3b, 0xba, 0x35, 0x59, 0x29, 0xa3, + 0xfb, 0xa7, 0x37, 0x08, 0x99, 0x49, 0xe0, 0x3e, 0x7e, 0xb8, 0x03, 0x16, 0xd3, 0xc2, 0x8e, 0x06, + 0x36, 0xe9, 0xb2, 0x01, 0x2e, 0xe2, 0x41, 0x37, 0x1e, 0x8e, 0xaf, 0xbd, 0x3d, 0xbb, 0xb4, 0x73, + 0xe7, 0x97, 0x76, 0xee, 0xe2, 0xd2, 0xce, 0x7d, 0xe9, 0xda, 0xd6, 0x59, 0xd7, 0xb6, 0xce, 0xbb, + 0xb6, 0x75, 0xd1, 0xb5, 0xad, 0xdf, 0x5d, 0xdb, 0xfa, 0xfa, 0xc7, 0xce, 0x7d, 0x58, 0x9d, 0xe4, + 0xb7, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0x01, 0xc1, 0xb1, 0xd8, 0x5d, 0x07, 0x00, 0x00, +} + +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MigrationCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MigrationCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MigrationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigrationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigrationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigrationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigrationSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigrationSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigrationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ContinueToken) + copy(dAtA[i:], m.ContinueToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContinueToken))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageVersionMigrationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageVersionMigrationStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageVersionMigrationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *MigrationCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionMigration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionMigrationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageVersionMigrationSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContinueToken) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StorageVersionMigrationStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GroupVersionResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupVersionResource{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `}`, + }, "") + return s +} +func (this *MigrationCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MigrationCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionMigration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StorageVersionMigrationSpec", "StorageVersionMigrationSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StorageVersionMigrationStatus", "StorageVersionMigrationStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigrationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageVersionMigration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageVersionMigration", "StorageVersionMigration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageVersionMigrationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigrationSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageVersionMigrationSpec{`, + `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "GroupVersionResource", "GroupVersionResource", 1), `&`, ``, 1) + `,`, + `ContinueToken:` + fmt.Sprintf("%v", this.ContinueToken) + `,`, + `}`, + }, "") + return s +} +func (this *StorageVersionMigrationStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]MigrationCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "MigrationCondition", "MigrationCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StorageVersionMigrationStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MigrationCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MigrationCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MigrationCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MigrationConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigrationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigrationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigrationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageVersionMigration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigrationSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigrationSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigrationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContinueToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContinueToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageVersionMigrationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageVersionMigrationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageVersionMigrationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, MigrationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto new file mode 100644 index 000000000..341e0bc5c --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto @@ -0,0 +1,127 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.storagemigration.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/storagemigration/v1alpha1"; + +// The names of the group, the version, and the resource. +message GroupVersionResource { + // The name of the group. + optional string group = 1; + + // The name of the version. + optional string version = 2; + + // The name of the resource. + optional string resource = 3; +} + +// Describes the state of a migration at a certain point. +message MigrationCondition { + // Type of the condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StorageVersionMigration represents a migration of stored data to the latest +// storage version. +message StorageVersionMigration { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the migration. + // +optional + optional StorageVersionMigrationSpec spec = 2; + + // Status of the migration. + // +optional + optional StorageVersionMigrationStatus status = 3; +} + +// StorageVersionMigrationList is a collection of storage version migrations. +message StorageVersionMigrationList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageVersionMigration + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated StorageVersionMigration items = 2; +} + +// Spec of the storage version migration. +message StorageVersionMigrationSpec { + // The resource that is being migrated. The migrator sends requests to + // the endpoint serving the resource. + // Immutable. + optional GroupVersionResource resource = 1; + + // The token used in the list options to get the next chunk of objects + // to migrate. When the .status.conditions indicates the migration is + // "Running", users can use this token to check the progress of the + // migration. + // +optional + optional string continueToken = 2; +} + +// Status of the storage version migration. +message StorageVersionMigrationStatus { + // The latest available observations of the migration's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + repeated MigrationCondition conditions = 1; + + // ResourceVersion to compare with the GC cache for performing the migration. + // This is the current resource version of given group, version and resource when + // kube-controller-manager first observes this StorageVersionMigration resource. + optional string resourceVersion = 2; +} + diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/register.go b/vendor/k8s.io/api/storagemigration/v1alpha1/register.go new file mode 100644 index 000000000..c9706050f --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GroupName is the group name use in this package +const GroupName = "storagemigration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageVersionMigration{}, + &StorageVersionMigrationList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/types.go b/vendor/k8s.io/api/storagemigration/v1alpha1/types.go new file mode 100644 index 000000000..0f343d1e9 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/types.go @@ -0,0 +1,131 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// StorageVersionMigration represents a migration of stored data to the latest +// storage version. +type StorageVersionMigration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the migration. + // +optional + Spec StorageVersionMigrationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status of the migration. + // +optional + Status StorageVersionMigrationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// Spec of the storage version migration. +type StorageVersionMigrationSpec struct { + // The resource that is being migrated. The migrator sends requests to + // the endpoint serving the resource. + // Immutable. + Resource GroupVersionResource `json:"resource" protobuf:"bytes,1,opt,name=resource"` + // The token used in the list options to get the next chunk of objects + // to migrate. When the .status.conditions indicates the migration is + // "Running", users can use this token to check the progress of the + // migration. + // +optional + ContinueToken string `json:"continueToken,omitempty" protobuf:"bytes,2,opt,name=continueToken"` + // TODO: consider recording the storage version hash when the migration + // is created. It can avoid races. +} + +// The names of the group, the version, and the resource. +type GroupVersionResource struct { + // The name of the group. + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // The name of the version. + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + // The name of the resource. + Resource string `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` +} + +type MigrationConditionType string + +const ( + // Indicates that the migration is running. + MigrationRunning MigrationConditionType = "Running" + // Indicates that the migration has completed successfully. + MigrationSucceeded MigrationConditionType = "Succeeded" + // Indicates that the migration has failed. + MigrationFailed MigrationConditionType = "Failed" +) + +// Describes the state of a migration at a certain point. +type MigrationCondition struct { + // Type of the condition. + Type MigrationConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=MigrationConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,3,opt,name=lastUpdateTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// Status of the storage version migration. +type StorageVersionMigrationStatus struct { + // The latest available observations of the migration's current state. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []MigrationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + // ResourceVersion to compare with the GC cache for performing the migration. + // This is the current resource version of given group, version and resource when + // kube-controller-manager first observes this StorageVersionMigration resource. + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 + +// StorageVersionMigrationList is a collection of storage version migrations. +type StorageVersionMigrationList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of StorageVersionMigration + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Items []StorageVersionMigration `json:"items" listType:"map" listMapKey:"type" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..257d72a23 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_GroupVersionResource = map[string]string{ + "": "The names of the group, the version, and the resource.", + "group": "The name of the group.", + "version": "The name of the version.", + "resource": "The name of the resource.", +} + +func (GroupVersionResource) SwaggerDoc() map[string]string { + return map_GroupVersionResource +} + +var map_MigrationCondition = map[string]string{ + "": "Describes the state of a migration at a certain point.", + "type": "Type of the condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (MigrationCondition) SwaggerDoc() map[string]string { + return map_MigrationCondition +} + +var map_StorageVersionMigration = map[string]string{ + "": "StorageVersionMigration represents a migration of stored data to the latest storage version.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the migration.", + "status": "Status of the migration.", +} + +func (StorageVersionMigration) SwaggerDoc() map[string]string { + return map_StorageVersionMigration +} + +var map_StorageVersionMigrationList = map[string]string{ + "": "StorageVersionMigrationList is a collection of storage version migrations.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of StorageVersionMigration", +} + +func (StorageVersionMigrationList) SwaggerDoc() map[string]string { + return map_StorageVersionMigrationList +} + +var map_StorageVersionMigrationSpec = map[string]string{ + "": "Spec of the storage version migration.", + "resource": "The resource that is being migrated. The migrator sends requests to the endpoint serving the resource. Immutable.", + "continueToken": "The token used in the list options to get the next chunk of objects to migrate. When the .status.conditions indicates the migration is \"Running\", users can use this token to check the progress of the migration.", +} + +func (StorageVersionMigrationSpec) SwaggerDoc() map[string]string { + return map_StorageVersionMigrationSpec +} + +var map_StorageVersionMigrationStatus = map[string]string{ + "": "Status of the storage version migration.", + "conditions": "The latest available observations of the migration's current state.", + "resourceVersion": "ResourceVersion to compare with the GC cache for performing the migration. This is the current resource version of given group, version and resource when kube-controller-manager first observes this StorageVersionMigration resource.", +} + +func (StorageVersionMigrationStatus) SwaggerDoc() map[string]string { + return map_StorageVersionMigrationStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..9d35011d5 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,160 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { + return nil + } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationCondition) DeepCopyInto(out *MigrationCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationCondition. +func (in *MigrationCondition) DeepCopy() *MigrationCondition { + if in == nil { + return nil + } + out := new(MigrationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigration) DeepCopyInto(out *StorageVersionMigration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigration. +func (in *StorageVersionMigration) DeepCopy() *StorageVersionMigration { + if in == nil { + return nil + } + out := new(StorageVersionMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationList) DeepCopyInto(out *StorageVersionMigrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageVersionMigration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationList. +func (in *StorageVersionMigrationList) DeepCopy() *StorageVersionMigrationList { + if in == nil { + return nil + } + out := new(StorageVersionMigrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationSpec) DeepCopyInto(out *StorageVersionMigrationSpec) { + *out = *in + out.Resource = in.Resource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationSpec. +func (in *StorageVersionMigrationSpec) DeepCopy() *StorageVersionMigrationSpec { + if in == nil { + return nil + } + out := new(StorageVersionMigrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationStatus) DeepCopyInto(out *StorageVersionMigrationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]MigrationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationStatus. +func (in *StorageVersionMigrationStatus) DeepCopy() *StorageVersionMigrationStatus { + if in == nil { + return nil + } + out := new(StorageVersionMigrationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..acdb57435 --- /dev/null +++ b/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageVersionMigration) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *StorageVersionMigration) APILifecycleDeprecated() (major, minor int) { + return 1, 33 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *StorageVersionMigration) APILifecycleRemoved() (major, minor int) { + return 1, 36 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageVersionMigrationList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *StorageVersionMigrationList) APILifecycleDeprecated() (major, minor int) { + return 1, 33 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *StorageVersionMigrationList) APILifecycleRemoved() (major, minor int) { + return 1, 36 +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 000000000..f02fa8e43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index d4c73022f..1a9f5e770 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -1,20 +1,16 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- saad-ali -- janetkuo -- tallclair -- dims -- hongchaodeng -- krousey -- cjcullen + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - saad-ali + - janetkuo + - tallclair + - dims + - cjcullen diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 97e17be39..57e0e71f6 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -87,21 +87,21 @@ func (e *StatusError) DebugError() (string, []interface{}) { // HasStatusCause returns true if the provided error has a details cause // with the provided type name. +// It supports wrapped errors and returns false when the error is nil. func HasStatusCause(err error, name metav1.CauseType) bool { _, ok := StatusCause(err, name) return ok } // StatusCause returns the named cause from the provided error if it exists and -// the error is of the type APIStatus. Otherwise it returns false. +// the error unwraps to the type APIStatus. Otherwise it returns false. func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) { - apierr, ok := err.(APIStatus) - if !ok || apierr == nil || apierr.Status().Details == nil { - return metav1.StatusCause{}, false - } - for _, cause := range apierr.Status().Details.Causes { - if cause.Type == name { - return cause, true + status, ok := err.(APIStatus) + if (ok || errors.As(err, &status)) && status.Status().Details != nil { + for _, cause := range status.Status().Details.Causes { + if cause.Type == name { + return cause, true + } } } return metav1.StatusCause{}, false @@ -523,7 +523,7 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr } // IsNotFound returns true if the specified error was created by NewNotFound. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsNotFound(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonNotFound { @@ -536,13 +536,13 @@ func IsNotFound(err error) bool { } // IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsAlreadyExists(err error) bool { return ReasonForError(err) == metav1.StatusReasonAlreadyExists } // IsConflict determines if the err is an error which indicates the provided update conflicts. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsConflict(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonConflict { @@ -555,7 +555,7 @@ func IsConflict(err error) bool { } // IsInvalid determines if the err is an error which indicates the provided resource is not valid. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsInvalid(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonInvalid { @@ -568,7 +568,7 @@ func IsInvalid(err error) bool { } // IsGone is true if the error indicates the requested resource is no longer available. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsGone(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonGone { @@ -582,13 +582,13 @@ func IsGone(err error) bool { // IsResourceExpired is true if the error indicates the resource has expired and the current action is // no longer possible. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsResourceExpired(err error) bool { return ReasonForError(err) == metav1.StatusReasonExpired } // IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsNotAcceptable(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonNotAcceptable { @@ -601,7 +601,7 @@ func IsNotAcceptable(err error) bool { } // IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsUnsupportedMediaType(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonUnsupportedMediaType { @@ -615,7 +615,7 @@ func IsUnsupportedMediaType(err error) bool { // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsMethodNotSupported(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonMethodNotAllowed { @@ -628,7 +628,7 @@ func IsMethodNotSupported(err error) bool { } // IsServiceUnavailable is true if the error indicates the underlying service is no longer available. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsServiceUnavailable(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonServiceUnavailable { @@ -641,7 +641,7 @@ func IsServiceUnavailable(err error) bool { } // IsBadRequest determines if err is an error which indicates that the request is invalid. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsBadRequest(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonBadRequest { @@ -655,7 +655,7 @@ func IsBadRequest(err error) bool { // IsUnauthorized determines if err is an error which indicates that the request is unauthorized and // requires authentication by the user. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsUnauthorized(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonUnauthorized { @@ -669,7 +669,7 @@ func IsUnauthorized(err error) bool { // IsForbidden determines if err is an error which indicates that the request is forbidden and cannot // be completed as requested. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsForbidden(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonForbidden { @@ -683,7 +683,7 @@ func IsForbidden(err error) bool { // IsTimeout determines if err is an error which indicates that request times out due to long // processing. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsTimeout(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonTimeout { @@ -697,7 +697,7 @@ func IsTimeout(err error) bool { // IsServerTimeout determines if err is an error which indicates that the request needs to be retried // by the client. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsServerTimeout(err error) bool { // do not check the status code, because no https status code exists that can // be scoped to retryable timeouts. @@ -705,7 +705,7 @@ func IsServerTimeout(err error) bool { } // IsInternalError determines if err is an error which indicates an internal server error. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsInternalError(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonInternalError { @@ -719,7 +719,7 @@ func IsInternalError(err error) bool { // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsTooManyRequests(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonTooManyRequests { @@ -737,7 +737,7 @@ func IsTooManyRequests(err error) bool { // IsRequestEntityTooLargeError determines if err is an error which indicates // the request entity is too large. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsRequestEntityTooLargeError(err error) bool { reason, code := reasonAndCodeForError(err) if reason == metav1.StatusReasonRequestEntityTooLarge { @@ -755,9 +755,10 @@ func IsRequestEntityTooLargeError(err error) bool { // IsUnexpectedServerError returns true if the server response was not in the expected API format, // and may be the result of another HTTP actor. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsUnexpectedServerError(err error) bool { - if status := APIStatus(nil); errors.As(err, &status) && status.Status().Details != nil { + status, ok := err.(APIStatus) + if (ok || errors.As(err, &status)) && status.Status().Details != nil { for _, cause := range status.Status().Details.Causes { if cause.Type == metav1.CauseTypeUnexpectedServerResponse { return true @@ -768,19 +769,20 @@ func IsUnexpectedServerError(err error) bool { } // IsUnexpectedObjectError determines if err is due to an unexpected object from the master. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func IsUnexpectedObjectError(err error) bool { - uoe := &UnexpectedObjectError{} - return err != nil && errors.As(err, &uoe) + uoe, ok := err.(*UnexpectedObjectError) + return err != nil && (ok || errors.As(err, &uoe)) } // SuggestsClientDelay returns true if this error suggests a client delay as well as the // suggested seconds to wait, or false if the error does not imply a wait. It does not // address whether the error *should* be retried, since some errors (like a 3xx) may // request delay without retry. -// It supports wrapped errors. +// It supports wrapped errors and returns false when the error is nil. func SuggestsClientDelay(err error) (int, bool) { - if t := APIStatus(nil); errors.As(err, &t) && t.Status().Details != nil { + t, ok := err.(APIStatus) + if (ok || errors.As(err, &t)) && t.Status().Details != nil { switch t.Status().Reason { // this StatusReason explicitly requests the caller to delay the action case metav1.StatusReasonServerTimeout: @@ -795,16 +797,17 @@ func SuggestsClientDelay(err error) (int, bool) { } // ReasonForError returns the HTTP status for a particular error. -// It supports wrapped errors. +// It supports wrapped errors and returns StatusReasonUnknown when +// the error is nil or doesn't have a status. func ReasonForError(err error) metav1.StatusReason { - if status := APIStatus(nil); errors.As(err, &status) { + if status, ok := err.(APIStatus); ok || errors.As(err, &status) { return status.Status().Reason } return metav1.StatusReasonUnknown } func reasonAndCodeForError(err error) (metav1.StatusReason, int32) { - if status := APIStatus(nil); errors.As(err, &status) { + if status, ok := err.(APIStatus); ok || errors.As(err, &status) { return status.Status().Reason, status.Status().Code } return metav1.StatusReasonUnknown, 0 diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index f929e061d..1e1330fff 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -1,18 +1,14 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- janetkuo -- ncdc -- dims -- krousey -- resouer -- mfojtik + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - ncdc + - dims diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 60c8209de..cbdf2eeb8 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -22,14 +22,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true +// if the conditions are changed by this call. // conditions must be non-nil. // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to // newCondition, LastTransitionTime is set to now if the new status differs from the old status) // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { +func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) { if conditions == nil { - return + return false } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -37,7 +38,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond newCondition.LastTransitionTime = metav1.NewTime(time.Now()) } *conditions = append(*conditions, newCondition) - return + return true } if existingCondition.Status != newCondition.Status { @@ -47,18 +48,31 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond } else { existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } + changed = true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - existingCondition.ObservedGeneration = newCondition.ObservedGeneration + if existingCondition.Reason != newCondition.Reason { + existingCondition.Reason = newCondition.Reason + changed = true + } + if existingCondition.Message != newCondition.Message { + existingCondition.Message = newCondition.Message + changed = true + } + if existingCondition.ObservedGeneration != newCondition.ObservedGeneration { + existingCondition.ObservedGeneration = newCondition.ObservedGeneration + changed = true + } + + return changed } -// RemoveStatusCondition removes the corresponding conditionType from conditions. +// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns +// true if it was present and got removed. // conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { +func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) { if conditions == nil || len(*conditions) == 0 { - return + return false } newConditions := make([]metav1.Condition, 0, len(*conditions)-1) for _, condition := range *conditions { @@ -67,7 +81,10 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) } } + removed = len(*conditions) != len(newConditions) *conditions = newConditions + + return removed } // FindStatusCondition finds the conditionType in conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go index cbf5d0263..f36aa4ec2 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -17,6 +17,7 @@ limitations under the License. package meta import ( + "errors" "fmt" "k8s.io/apimachinery/pkg/runtime/schema" @@ -43,6 +44,11 @@ func (e *AmbiguousResourceError) Error() string { return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) } +func (*AmbiguousResourceError) Is(target error) bool { + _, ok := target.(*AmbiguousResourceError) + return ok +} + // AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind type AmbiguousKindError struct { PartialKind schema.GroupVersionKind @@ -63,16 +69,16 @@ func (e *AmbiguousKindError) Error() string { return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) } +func (*AmbiguousKindError) Is(target error) bool { + _, ok := target.(*AmbiguousKindError) + return ok +} + func IsAmbiguousError(err error) bool { if err == nil { return false } - switch err.(type) { - case *AmbiguousResourceError, *AmbiguousKindError: - return true - default: - return false - } + return errors.Is(err, &AmbiguousResourceError{}) || errors.Is(err, &AmbiguousKindError{}) } // NoResourceMatchError is returned if the RESTMapper can't find any match for a resource @@ -84,6 +90,11 @@ func (e *NoResourceMatchError) Error() string { return fmt.Sprintf("no matches for %v", e.PartialResource) } +func (*NoResourceMatchError) Is(target error) bool { + _, ok := target.(*NoResourceMatchError) + return ok +} + // NoKindMatchError is returned if the RESTMapper can't find any match for a kind type NoKindMatchError struct { // GroupKind is the API group and kind that was searched @@ -108,14 +119,14 @@ func (e *NoKindMatchError) Error() string { } } +func (*NoKindMatchError) Is(target error) bool { + _, ok := target.(*NoKindMatchError) + return ok +} + func IsNoMatchError(err error) bool { if err == nil { return false } - switch err.(type) { - case *NoResourceMatchError, *NoKindMatchError: - return true - default: - return false - } + return errors.Is(err, &NoResourceMatchError{}) || errors.Is(err, &NoKindMatchError{}) } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index bc6ba9376..1fdd32c4b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -40,8 +40,7 @@ var ( // IsListType returns true if the provided Object has a slice called Items. // TODO: Replace the code in this check with an interface comparison by -// -// creating and enforcing that lists implement a list accessor. +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { switch t := obj.(type) { case runtime.Unstructured: @@ -98,7 +97,7 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { return nil, errExpectFieldItems } switch items.Kind() { - case reflect.Interface, reflect.Ptr: + case reflect.Interface, reflect.Pointer: target := reflect.TypeOf(items.Interface()).Elem() if target.Kind() != reflect.Slice { return nil, errExpectSliceItems @@ -113,8 +112,27 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { // EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates // the loop. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use EachListItemWithAlloc instead. func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, false) +} + +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func EachListItemWithAlloc(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func eachListItem(obj runtime.Object, fn func(runtime.Object) error, allocNew bool) error { if unstructured, ok := obj.(runtime.Unstructured); ok { + if allocNew { + return unstructured.EachListItemWithAlloc(fn) + } return unstructured.EachListItem(fn) } // TODO: Change to an interface call? @@ -131,7 +149,7 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { return nil } takeAddr := false - if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface { + if elemType := items.Type().Elem(); elemType.Kind() != reflect.Pointer && elemType.Kind() != reflect.Interface { if !items.Index(0).CanAddr() { return fmt.Errorf("unable to take address of items in %T for EachListItem", obj) } @@ -141,8 +159,19 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { for i := 0; i < len; i++ { raw := items.Index(i) if takeAddr { - raw = raw.Addr() + if allocNew { + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + // reflect.New will guarantee that itemCopy must be a pointer. + raw = itemCopy + } else { + raw = raw.Addr() + } } + // raw must be a pointer or an interface + // allocate a pointer is cheap switch item := raw.Interface().(type) { case *runtime.RawExtension: if err := fn(item.Object); err != nil { @@ -167,7 +196,23 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { // ExtractList returns obj's Items element as an array of runtime.Objects. // Returns an error if obj is not a List type (does not have an Items member). +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use ExtractListWithAlloc instead. func ExtractList(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, false) +} + +// ExtractListWithAlloc works like ExtractList, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items in the returned list are not retained, or are retained for the same duration, use ExtractList instead for memory efficiency. +func ExtractListWithAlloc(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) { itemsPtr, err := GetItemsPtr(obj) if err != nil { return nil, err @@ -177,10 +222,17 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return nil, err } list := make([]runtime.Object, items.Len()) + if len(list) == 0 { + return list, nil + } + elemType := items.Type().Elem() + isRawExtension := elemType == rawExtensionObjectType + implementsObject := elemType.Implements(objectType) for i := range list { raw := items.Index(i) - switch item := raw.Interface().(type) { - case runtime.RawExtension: + switch { + case isRawExtension: + item := raw.Interface().(runtime.RawExtension) switch { case item.Object != nil: list[i] = item.Object @@ -190,8 +242,18 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { default: list[i] = nil } - case runtime.Object: - list[i] = item + case implementsObject: + list[i] = raw.Interface().(runtime.Object) + case allocNew: + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + var ok bool + // reflect.New will guarantee that itemCopy must be a pointer. + if list[i], ok = itemCopy.Interface().(runtime.Object); !ok { + return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } default: var found bool if list[i], found = raw.Addr().Interface().(runtime.Object); !found { @@ -202,8 +264,12 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return list, nil } -// objectSliceType is the type of a slice of Objects -var objectSliceType = reflect.TypeOf([]runtime.Object{}) +var ( + // objectSliceType is the type of a slice of Objects + objectSliceType = reflect.TypeOf([]runtime.Object{}) + objectType = reflect.TypeOf((*runtime.Object)(nil)).Elem() + rawExtensionObjectType = reflect.TypeOf(runtime.RawExtension{}) +) // LenList returns the length of this list or 0 if it is not a list. func LenList(list runtime.Object) int { @@ -238,7 +304,7 @@ func SetList(list runtime.Object, objects []runtime.Object) error { slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) for i := range objects { dest := slice.Index(i) - if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { + if dest.Type() == rawExtensionObjectType { dest = dest.FieldByName("Object") } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 6a4116a04..2551f07f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -130,7 +130,6 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { Annotations: m.GetAnnotations(), OwnerReferences: m.GetOwnerReferences(), Finalizers: m.GetFinalizers(), - ClusterName: m.GetClusterName(), ManagedFields: m.GetManagedFields(), }, } @@ -600,7 +599,7 @@ func (a genericAccessor) SetFinalizers(finalizers []string) { func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice { klog.Errorf("expect %v to be a pointer to slice", s) return ret } @@ -618,7 +617,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + if s.Kind() != reflect.Pointer || s.Elem().Kind() != reflect.Slice { klog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go index f41b9bf78..91cb98cae 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -521,10 +521,9 @@ func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string } // MaybeResetRESTMapper calls Reset() on the mapper if it is a ResettableRESTMapper. -func MaybeResetRESTMapper(mapper RESTMapper) bool { +func MaybeResetRESTMapper(mapper RESTMapper) { m, ok := mapper.(ResettableRESTMapper) if ok { m.Reset() } - return ok } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go new file mode 100644 index 000000000..72c6438cb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testrestmapper + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TestOnlyStaticRESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: +// 1. legacy kube group preferred version, extensions preferred version, metrics preferred version, legacy +// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, +// all other groups alphabetical. +// +// TODO callers of this method should be updated to build their own specific restmapper based on their scheme for their tests +// TODO the things being tested are related to whether various cases are handled, not tied to the particular types being checked. +func TestOnlyStaticRESTMapper(scheme *runtime.Scheme, versionPatterns ...schema.GroupVersion) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + unionMapper = append(unionMapper, newRESTMapper(enabledVersion.Group, scheme)) + } + } + + if len(versionPatterns) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + for _, versionPriority := range versionPatterns { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + prioritizedGroups := []string{"", "extensions", "metrics"} + resourcePriority, kindPriority := prioritiesForGroups(scheme, prioritizedGroups...) + + prioritizedGroupsSet := sets.NewString(prioritizedGroups...) + remainingGroups := sets.String{} + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !prioritizedGroupsSet.Has(enabledVersion.Group) { + remainingGroups.Insert(enabledVersion.Group) + } + } + + remainingResourcePriority, remainingKindPriority := prioritiesForGroups(scheme, remainingGroups.List()...) + resourcePriority = append(resourcePriority, remainingResourcePriority...) + kindPriority = append(kindPriority, remainingKindPriority...) + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} +} + +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, +// then any non-preferred version of the group second. +func prioritiesForGroups(scheme *runtime.Scheme, groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, group := range groups { + availableVersions := scheme.PrioritizedVersionsForGroup(group) + if len(availableVersions) > 0 { + resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) + } + } + for _, group := range groups { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) + kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) + } + + return resourcePriority, kindPriority +} + +func newRESTMapper(group string, scheme *runtime.Scheme) meta.RESTMapper { + mapper := meta.NewDefaultRESTMapper(scheme.PrioritizedVersionsForGroup(group)) + for _, gv := range scheme.PrioritizedVersionsForGroup(group) { + for kind := range scheme.KnownTypes(gv) { + if ignoredKinds.Has(kind) { + continue + } + scope := meta.RESTScopeNamespace + if rootScopedKinds[gv.WithKind(kind).GroupKind()] { + scope = meta.RESTScopeRoot + } + mapper.Add(gv.WithKind(kind), scope) + } + } + + return mapper +} + +// hardcoded is good enough for the test we're running +var rootScopedKinds = map[schema.GroupKind]bool{ + {Group: "admission.k8s.io", Kind: "AdmissionReview"}: true, + + {Group: "admissionregistration.k8s.io", Kind: "ValidatingWebhookConfiguration"}: true, + {Group: "admissionregistration.k8s.io", Kind: "MutatingWebhookConfiguration"}: true, + + {Group: "authentication.k8s.io", Kind: "TokenReview"}: true, + + {Group: "authorization.k8s.io", Kind: "SubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectRulesReview"}: true, + + {Group: "certificates.k8s.io", Kind: "CertificateSigningRequest"}: true, + + {Group: "", Kind: "Node"}: true, + {Group: "", Kind: "Namespace"}: true, + {Group: "", Kind: "PersistentVolume"}: true, + {Group: "", Kind: "ComponentStatus"}: true, + + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: true, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRoleBinding"}: true, + + {Group: "scheduling.k8s.io", Kind: "PriorityClass"}: true, + + {Group: "storage.k8s.io", Kind: "StorageClass"}: true, + {Group: "storage.k8s.io", Kind: "VolumeAttachment"}: true, + + {Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: true, + + {Group: "apiserver.k8s.io", Kind: "AdmissionConfiguration"}: true, + + {Group: "audit.k8s.io", Kind: "Event"}: true, + {Group: "audit.k8s.io", Kind: "Policy"}: true, + + {Group: "apiregistration.k8s.io", Kind: "APIService"}: true, + + {Group: "metrics.k8s.io", Kind: "NodeMetrics"}: true, + + {Group: "wardle.example.com", Kind: "Fischer"}: true, +} + +// hardcoded is good enough for the test we're running +var ignoredKinds = sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index 15bded17a..063fd285d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -1,12 +1,10 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- derekwaynecarr -- mikedanese -- saad-ali -- janetkuo -- xiang90 + - thockin + - smarterclayton + - wojtek-t + - derekwaynecarr + - mikedanese + - saad-ali + - janetkuo diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go index a8866a43e..2eebec667 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -203,6 +203,44 @@ func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } +// Mul multiplies the provided b to the current amount, or +// returns false if overflow or underflow would result. +func (a *int64Amount) Mul(b int64) bool { + switch { + case a.value == 0: + return true + case b == 0: + a.value = 0 + a.scale = 0 + return true + case a.scale == 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + a.value = c + case a.scale > 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = positiveScaleInt64(c, a.scale); !ok { + return false + } + a.value = c + default: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = negativeScaleInt64(c, -a.scale); !ok { + return false + } + a.value = c + } + return true +} + // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 172db57fa..c3a272168 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// source: k8s.io/apimachinery/pkg/api/resource/generated.proto package resource @@ -41,7 +41,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Quantity) Reset() { *m = Quantity{} } func (*Quantity) ProtoMessage() {} func (*Quantity) Descriptor() ([]byte, []int) { - return fileDescriptor_612bba87bd70906c, []int{0} + return fileDescriptor_7288c78ff45111e9, []int{0} } func (m *Quantity) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Quantity.Unmarshal(m, b) @@ -64,7 +64,7 @@ var xxx_messageInfo_Quantity proto.InternalMessageInfo func (m *QuantityValue) Reset() { *m = QuantityValue{} } func (*QuantityValue) ProtoMessage() {} func (*QuantityValue) Descriptor() ([]byte, []int) { - return fileDescriptor_612bba87bd70906c, []int{1} + return fileDescriptor_7288c78ff45111e9, []int{1} } func (m *QuantityValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_QuantityValue.Unmarshal(m, b) @@ -90,25 +90,24 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) + proto.RegisterFile("k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_7288c78ff45111e9) } -var fileDescriptor_612bba87bd70906c = []byte{ - // 254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0xcd, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0x4b, - 0xcd, 0x4b, 0xc9, 0x2f, 0xd2, 0x87, 0x4a, 0x24, 0x16, 0x64, 0xe6, 0x26, 0x26, 0x67, 0x64, 0xe6, - 0xa5, 0x16, 0x55, 0xea, 0x17, 0x64, 0xa7, 0x83, 0x04, 0xf4, 0x8b, 0x52, 0x8b, 0xf3, 0x4b, 0x8b, - 0x92, 0x53, 0xf5, 0xd3, 0x53, 0xf3, 0x52, 0x8b, 0x12, 0x4b, 0x52, 0x53, 0xf4, 0x0a, 0x8a, 0xf2, - 0x4b, 0xf2, 0x85, 0x54, 0x20, 0xba, 0xf4, 0x90, 0x75, 0xe9, 0x15, 0x64, 0xa7, 0x83, 0x04, 0xf4, - 0x60, 0xba, 0xa4, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, - 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x18, - 0xaa, 0x64, 0xc1, 0xc5, 0x11, 0x58, 0x9a, 0x98, 0x57, 0x92, 0x59, 0x52, 0x29, 0x24, 0xc6, 0xc5, - 0x56, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe5, 0x59, - 0x89, 0xcc, 0x58, 0x20, 0xcf, 0xd0, 0xb1, 0x50, 0x9e, 0x61, 0xc2, 0x42, 0x79, 0x86, 0x05, 0x0b, - 0xe5, 0x19, 0x1a, 0xee, 0x28, 0x30, 0x28, 0xd9, 0x72, 0xf1, 0xc2, 0x74, 0x86, 0x25, 0xe6, 0x94, - 0xa6, 0x92, 0xa6, 0xdd, 0x49, 0xef, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, - 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x37, - 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x14, 0x07, 0xcc, 0x5f, - 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0x76, 0x9f, 0x66, 0x4d, 0x01, 0x00, 0x00, +var fileDescriptor_7288c78ff45111e9 = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xc9, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0x17, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0xea, 0xa7, + 0xa7, 0xe6, 0xa5, 0x16, 0x25, 0x96, 0xa4, 0xa6, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xa9, + 0x40, 0x74, 0xe9, 0x21, 0xeb, 0xd2, 0x2b, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0xc1, 0x74, 0x49, 0xe9, + 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, + 0x83, 0x35, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x54, 0xc9, 0x82, 0x8b, + 0x23, 0xb0, 0x34, 0x31, 0xaf, 0x24, 0xb3, 0xa4, 0x52, 0x48, 0x8c, 0x8b, 0xad, 0xb8, 0xa4, 0x28, + 0x33, 0x2f, 0x5d, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xca, 0xb3, 0x12, 0x99, 0xb1, 0x40, + 0x9e, 0xa1, 0x63, 0xa1, 0x3c, 0xc3, 0x84, 0x85, 0xf2, 0x0c, 0x0b, 0x16, 0xca, 0x33, 0x34, 0xdc, + 0x51, 0x60, 0x50, 0xb2, 0xe5, 0xe2, 0x85, 0xe9, 0x0c, 0x4b, 0xcc, 0x29, 0x4d, 0x25, 0x4d, 0xbb, + 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, + 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x6f, 0x3c, 0x92, 0x63, 0x7c, + 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x28, 0x15, 0x62, 0x42, 0x0a, 0x10, 0x00, 0x00, + 0xff, 0xff, 0x50, 0x91, 0xd0, 0x9c, 0x50, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index f8b3f80c0..ddd0db8fb 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -22,7 +22,7 @@ syntax = "proto2"; package k8s.io.apimachinery.pkg.api.resource; // Package-wide variables from generator "generated". -option go_package = "resource"; +option go_package = "k8s.io/apimachinery/pkg/api/resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, @@ -30,6 +30,7 @@ option go_package = "resource"; // // The serialization format is: // +// ``` // ::= // // (Note that may be empty, from the "" case in .) @@ -49,6 +50,7 @@ option go_package = "resource"; // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) // // ::= "e" | "E" +// ``` // // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal @@ -63,16 +65,16 @@ option go_package = "resource"; // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: // -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. +// - No precision is lost +// - No fractional digits will be emitted +// - The exponent (or suffix) is as large as possible. // // The sign will be omitted unless the number is negative. // // Examples: // -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" +// - 1.5 will be serialized as "1500m" +// - 1.5Gi will be serialized as "1536Mi" // // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 323537c5b..50af8334f 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + inf "gopkg.in/inf.v0" ) @@ -34,6 +36,7 @@ import ( // // The serialization format is: // +// ``` // ::= // // (Note that may be empty, from the "" case in .) @@ -53,6 +56,7 @@ import ( // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) // // ::= "e" | "E" +// ``` // // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal @@ -67,16 +71,16 @@ import ( // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: // -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. +// - No precision is lost +// - No fractional digits will be emitted +// - The exponent (or suffix) is as large as possible. // // The sign will be omitted unless the number is negative. // // Examples: // -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" +// - 1.5 will be serialized as "1500m" +// - 1.5Gi will be serialized as "1536Mi" // // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. @@ -406,6 +410,10 @@ func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} } // the OpenAPI spec of this type. func (_ Quantity) OpenAPISchemaFormat() string { return "" } +// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing +// the OpenAPI v3 spec of this type. +func (Quantity) OpenAPIV3OneOfTypes() []string { return []string{"string", "number"} } + // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // // Note about BinarySI: @@ -586,6 +594,16 @@ func (q *Quantity) Sub(y Quantity) { q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } +// Mul multiplies the provided y to the current value. +// It will return false if the result is inexact. Otherwise, it will return true. +func (q *Quantity) Mul(y int64) bool { + q.s = "" + if q.d.Dec == nil && q.i.Mul(y) { + return true + } + return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64() +} + // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { @@ -648,7 +666,7 @@ func (q Quantity) MarshalJSON() ([]byte, error) { copy(out[1:], q.s) return out, nil } - result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes) + result := make([]byte, int64QuantityExpectedBytes) result[0] = '"' number, suffix := q.CanonicalizeBytes(result[1:1]) // if the same slice was returned to us that we passed in, avoid another allocation by copying number into @@ -667,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +func (q Quantity) MarshalCBOR() ([]byte, error) { + // The call to String() should never return the string "" because the receiver's + // address will never be nil. + return cbor.Marshal(q.String()) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (q Quantity) ToUnstructured() interface{} { return q.String() @@ -695,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { return nil } +func (q *Quantity) UnmarshalCBOR(value []byte) error { + var s *string + if err := cbor.Unmarshal(value, &s); err != nil { + return err + } + + if s == nil { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + + parsed, err := ParseQuantity(strings.TrimSpace(*s)) + if err != nil { + return err + } + + *q = parsed + return nil +} + // NewDecimalQuantity returns a new Quantity representing the given // value in the given format. func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go index 5ed7abe66..6ec527f9c 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go @@ -165,7 +165,7 @@ func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s if exponent == 0 { return nil, true } - result := make([]byte, 8, 8) + result := make([]byte, 8) result[0] = 'e' number := strconv.AppendInt(result[1:1], int64(exponent), 10) if &result[1] == &number[0] { diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS new file mode 100644 index 000000000..402373247 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go new file mode 100644 index 000000000..9f20152e4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation contains generic api type validation functions. +package validation // import "k8s.io/apimachinery/pkg/api/validation" diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go new file mode 100644 index 000000000..e0b5b1490 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go @@ -0,0 +1,88 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// IsNegativeErrorMsg is a error message for value must be greater than or equal to 0. +const IsNegativeErrorMsg string = `must be greater than or equal to 0` + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string + +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Subdomain(name) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Label(name) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1035Label(name) +} + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = NameIsDNSLabel + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = NameIsDNSSubdomain + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if it is a dash and if the length of this string is greater than 1. Note that +// this is used when a value could be appended to the string, see ValidateNameFunc +// for more info. +func maskTrailingDash(name string) string { + if len(name) > 1 && strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// ValidateNonnegativeField validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg)) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go new file mode 100644 index 000000000..593d7ba8c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -0,0 +1,265 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// FieldImmutableErrorMsg is a error message for field is immutable. +const FieldImmutableErrorMsg string = `field is immutable` + +const TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[schema.GroupVersionKind]struct{}{ + {Group: "", Version: "v1", Kind: "Event"}: {}, +} + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k := range annotations { + // The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking. + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + } + if err := ValidateAnnotationsSize(annotations); err != nil { + allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB)) + } + return allErrs +} + +func ValidateAnnotationsSize(annotations map[string]string) error { + var totalSize int64 + for k, v := range annotations { + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(TotalAnnotationSizeLimitB) { + return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, TotalAnnotationSizeLimitB) + } + return nil +} + +func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +// ValidateOwnerReferences validates that a set of owner references are correctly defined. +func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + firstControllerName := "" + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + if ref.Controller != nil && *ref.Controller { + curControllerName := ref.Kind + "/" + ref.Name + if firstControllerName != "" { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, + fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", firstControllerName, curControllerName))) + } else { + firstControllerName = curControllerName + } + } + } + return allErrs +} + +// ValidateFinalizerName validates finalizer names. +func ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + + return allErrs +} + +// ValidateNoNewFinalizers validates the new finalizers has no new finalizers compare to old finalizers. +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + +// ValidateImmutableField validates the new value and the old value are deeply equal. +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !apiequality.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + metadata, err := meta.Accessor(objMeta) + if err != nil { + var allErrs field.ErrorList + allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath) +} + +// ValidateObjectMetaAccessor validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(meta.GetGenerateName()) != 0 { + for _, msg := range nameFn(meta.GetGenerateName(), true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GetGenerateName(), msg)) + } + } + // If the generated name validates, but the calculated value does not, it's a problem with generation, and we + // report it here. This may confuse users, but indicates a programming bug and still must be validated. + // If there are multiple fields out of which one is required then add an or as a separator + if len(meta.GetName()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) + } else { + for _, msg := range nameFn(meta.GetName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg)) + } + } + if requiresNamespace { + if len(meta.GetNamespace()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } else { + for _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.GetNamespace(), msg)) + } + } + } else { + if len(meta.GetNamespace()) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) + } + } + + allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...) + allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(meta.GetManagedFields(), fldPath.Child("managedFields"))...) + return allErrs +} + +// ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers. +func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + hasFinalizerOrphanDependents := false + hasFinalizerDeleteDependents := false + for _, finalizer := range finalizers { + allErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...) + if finalizer == metav1.FinalizerOrphanDependents { + hasFinalizerOrphanDependents = true + } + if finalizer == metav1.FinalizerDeleteDependents { + hasFinalizerDeleteDependents = true + } + } + if hasFinalizerDeleteDependents && hasFinalizerOrphanDependents { + allErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf("finalizer %s and %s cannot be both set", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents))) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated. +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + newMetadata, err := meta.Accessor(newMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error())) + return allErrs + } + oldMetadata, err := meta.Accessor(oldMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath) +} + +// ValidateObjectMetaAccessorUpdate validates an object's metadata when updated. +func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.GetDeletionTimestamp() != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child("finalizers"))...) + } + + // Reject updates that don't specify a resource version + if len(newMeta.GetResourceVersion()) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.GetResourceVersion(), "must be specified for an update")) + } + + // Generation shouldn't be decremented + if newMeta.GetGeneration() < oldMeta.GetGeneration() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented")) + } + + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child("creationTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionTimestamp(), oldMeta.GetDeletionTimestamp(), fldPath.Child("deletionTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionGracePeriodSeconds(), oldMeta.GetDeletionGracePeriodSeconds(), fldPath.Child("deletionGracePeriodSeconds"))...) + + allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, v1validation.ValidateManagedFields(newMeta.GetManagedFields(), fldPath.Child("managedFields"))...) + + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go new file mode 100644 index 000000000..29c6a48b6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// SetListOptionsDefaults sets defaults on the provided ListOptions if applicable. +// +// TODO(#115478): once the watch-list fg is always on we register this function in the scheme (via AddTypeDefaultingFunc). +// TODO(#115478): when the function is registered in the scheme remove all callers of this method. +func SetListOptionsDefaults(obj *ListOptions, isWatchListFeatureEnabled bool) { + if !isWatchListFeatureEnabled { + return + } + if obj.SendInitialEvents != nil || len(obj.ResourceVersionMatch) != 0 { + return + } + legacy := obj.ResourceVersion == "" || obj.ResourceVersion == "0" + if obj.Watch && legacy { + turnOnInitialEvents := true + obj.SendInitialEvents = &turnOnInitialEvents + obj.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index a49b5f2be..00d2b8c68 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -66,6 +66,31 @@ type ListOptions struct { // it does not recognize and will return a 410 error if the token can no longer be used because // it has expired. Continue string + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + SendInitialEvents *bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go new file mode 100644 index 000000000..2734a8f3b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go @@ -0,0 +1,76 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateListOptions returns all validation errors found while validating the ListOptions. +func ValidateListOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + if options.Watch { + return validateWatchOptions(options, isWatchListFeatureEnabled) + } + allErrs := field.ErrorList{} + if match := options.ResourceVersionMatch; len(match) > 0 { + if len(options.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided")) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + if match != metav1.ResourceVersionMatchExact && match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchExact), string(metav1.ResourceVersionMatchNotOlderThan), ""})) + } + if match == metav1.ResourceVersionMatchExact && options.ResourceVersion == "0" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\"")) + } + } + if options.SendInitialEvents != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for list")) + } + return allErrs +} + +func validateWatchOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + allErrs := field.ErrorList{} + match := options.ResourceVersionMatch + if options.SendInitialEvents != nil { + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), fmt.Sprintf("sendInitialEvents requires setting resourceVersionMatch to %s", metav1.ResourceVersionMatchNotOlderThan))) + } + if !isWatchListFeatureEnabled { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for watch unless the WatchList feature gate is enabled")) + } + } + if len(match) > 0 { + if options.SendInitialEvents == nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch unless sendInitialEvents is provided")) + } + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchNotOlderThan)})) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 6d212b846..a6552c276 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -115,6 +115,7 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } @@ -137,6 +138,7 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 6e1eac5c7..af66a2ac4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -75,6 +75,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 5731b9ee2..e7e5c152d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -1,23 +1,16 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- caesarxuchao -- liggitt -- davidopp -- sttts -- quinton-hoole -- luxas -- janetkuo -- justinsb -- ncdc -- soltysh -- dims -- hongchaodeng -- krousey -- therc -- kevin-wangzefeng + - thockin + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - sttts + - luxas + - janetkuo + - justinsb + - ncdc + - soltysh + - dims diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 15b45ffa8..5005beb12 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -18,6 +18,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" ) // IsControlledBy checks if the object has a controllerRef set to the given owner @@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference { return nil } cp := *ref + cp.Controller = ptr.To(*ref.Controller) + if ref.BlockOwnerDeletion != nil { + cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion) + } return &cp } -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller func GetControllerOfNoCopy(controllee Object) *OwnerReference { refs := controllee.GetOwnerReferences() for i := range refs { @@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference { // NewControllerRef creates an OwnerReference pointing to the given owner. func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { - blockOwnerDeletion := true - isController := true return &OwnerReference{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: &blockOwnerDeletion, - Controller: &isController, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 9e7924c12..229ea2c2c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// source: k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto package v1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *APIGroup) Reset() { *m = APIGroup{} } func (*APIGroup) ProtoMessage() {} func (*APIGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{0} + return fileDescriptor_a8431b6e0aeeb761, []int{0} } func (m *APIGroup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_APIGroup proto.InternalMessageInfo func (m *APIGroupList) Reset() { *m = APIGroupList{} } func (*APIGroupList) ProtoMessage() {} func (*APIGroupList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{1} + return fileDescriptor_a8431b6e0aeeb761, []int{1} } func (m *APIGroupList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_APIGroupList proto.InternalMessageInfo func (m *APIResource) Reset() { *m = APIResource{} } func (*APIResource) ProtoMessage() {} func (*APIResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{2} + return fileDescriptor_a8431b6e0aeeb761, []int{2} } func (m *APIResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_APIResource proto.InternalMessageInfo func (m *APIResourceList) Reset() { *m = APIResourceList{} } func (*APIResourceList) ProtoMessage() {} func (*APIResourceList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{3} + return fileDescriptor_a8431b6e0aeeb761, []int{3} } func (m *APIResourceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_APIResourceList proto.InternalMessageInfo func (m *APIVersions) Reset() { *m = APIVersions{} } func (*APIVersions) ProtoMessage() {} func (*APIVersions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{4} + return fileDescriptor_a8431b6e0aeeb761, []int{4} } func (m *APIVersions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_APIVersions proto.InternalMessageInfo func (m *ApplyOptions) Reset() { *m = ApplyOptions{} } func (*ApplyOptions) ProtoMessage() {} func (*ApplyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{5} + return fileDescriptor_a8431b6e0aeeb761, []int{5} } func (m *ApplyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_ApplyOptions proto.InternalMessageInfo func (m *Condition) Reset() { *m = Condition{} } func (*Condition) ProtoMessage() {} func (*Condition) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{6} + return fileDescriptor_a8431b6e0aeeb761, []int{6} } func (m *Condition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_Condition proto.InternalMessageInfo func (m *CreateOptions) Reset() { *m = CreateOptions{} } func (*CreateOptions) ProtoMessage() {} func (*CreateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{7} + return fileDescriptor_a8431b6e0aeeb761, []int{7} } func (m *CreateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_CreateOptions proto.InternalMessageInfo func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (*DeleteOptions) ProtoMessage() {} func (*DeleteOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{8} + return fileDescriptor_a8431b6e0aeeb761, []int{8} } func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo func (m *Duration) Reset() { *m = Duration{} } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{9} + return fileDescriptor_a8431b6e0aeeb761, []int{9} } func (m *Duration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() { var xxx_messageInfo_Duration proto.InternalMessageInfo +func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} } +func (*FieldSelectorRequirement) ProtoMessage() {} +func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_a8431b6e0aeeb761, []int{10} +} +func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorRequirement.Merge(m, src) +} +func (m *FieldSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo + func (m *FieldsV1) Reset() { *m = FieldsV1{} } func (*FieldsV1) ProtoMessage() {} func (*FieldsV1) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{10} + return fileDescriptor_a8431b6e0aeeb761, []int{11} } func (m *FieldsV1) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} func (*GetOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{11} + return fileDescriptor_a8431b6e0aeeb761, []int{12} } func (m *GetOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} func (*GroupKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{12} + return fileDescriptor_a8431b6e0aeeb761, []int{13} } func (m *GroupKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} func (*GroupResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{13} + return fileDescriptor_a8431b6e0aeeb761, []int{14} } func (m *GroupResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} func (*GroupVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{14} + return fileDescriptor_a8431b6e0aeeb761, []int{15} } func (m *GroupVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{15} + return fileDescriptor_a8431b6e0aeeb761, []int{16} } func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} func (*GroupVersionKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{16} + return fileDescriptor_a8431b6e0aeeb761, []int{17} } func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} func (*GroupVersionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{17} + return fileDescriptor_a8431b6e0aeeb761, []int{18} } func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} func (*LabelSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{18} + return fileDescriptor_a8431b6e0aeeb761, []int{19} } func (m *LabelSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{19} + return fileDescriptor_a8431b6e0aeeb761, []int{20} } func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{20} + return fileDescriptor_a8431b6e0aeeb761, []int{21} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} func (*ListMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{21} + return fileDescriptor_a8431b6e0aeeb761, []int{22} } func (m *ListMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} func (*ListOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{22} + return fileDescriptor_a8431b6e0aeeb761, []int{23} } func (m *ListOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } func (*ManagedFieldsEntry) ProtoMessage() {} func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{23} + return fileDescriptor_a8431b6e0aeeb761, []int{24} } func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} func (*MicroTime) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{24} + return fileDescriptor_a8431b6e0aeeb761, []int{25} } func (m *MicroTime) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MicroTime.Unmarshal(m, b) @@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{25} + return fileDescriptor_a8431b6e0aeeb761, []int{26} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{26} + return fileDescriptor_a8431b6e0aeeb761, []int{27} } func (m *OwnerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } func (*PartialObjectMetadata) ProtoMessage() {} func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{27} + return fileDescriptor_a8431b6e0aeeb761, []int{28} } func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{28} + return fileDescriptor_a8431b6e0aeeb761, []int{29} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} func (*Patch) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{29} + return fileDescriptor_a8431b6e0aeeb761, []int{30} } func (m *Patch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo func (m *PatchOptions) Reset() { *m = PatchOptions{} } func (*PatchOptions) ProtoMessage() {} func (*PatchOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{30} + return fileDescriptor_a8431b6e0aeeb761, []int{31} } func (m *PatchOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{31} + return fileDescriptor_a8431b6e0aeeb761, []int{32} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} func (*RootPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{32} + return fileDescriptor_a8431b6e0aeeb761, []int{33} } func (m *RootPaths) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{33} + return fileDescriptor_a8431b6e0aeeb761, []int{34} } func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{34} + return fileDescriptor_a8431b6e0aeeb761, []int{35} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} func (*StatusCause) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{35} + return fileDescriptor_a8431b6e0aeeb761, []int{36} } func (m *StatusCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} func (*StatusDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{36} + return fileDescriptor_a8431b6e0aeeb761, []int{37} } func (m *StatusDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo func (m *TableOptions) Reset() { *m = TableOptions{} } func (*TableOptions) ProtoMessage() {} func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{37} + return fileDescriptor_a8431b6e0aeeb761, []int{38} } func (m *TableOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{38} + return fileDescriptor_a8431b6e0aeeb761, []int{39} } func (m *Time) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Time.Unmarshal(m, b) @@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{39} + return fileDescriptor_a8431b6e0aeeb761, []int{40} } func (m *Timestamp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{40} + return fileDescriptor_a8431b6e0aeeb761, []int{41} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} func (*UpdateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{41} + return fileDescriptor_a8431b6e0aeeb761, []int{42} } func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} func (*Verbs) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{42} + return fileDescriptor_a8431b6e0aeeb761, []int{43} } func (m *Verbs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} func (*WatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{43} + return fileDescriptor_a8431b6e0aeeb761, []int{44} } func (m *WatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1282,6 +1310,7 @@ func init() { proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement") proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") @@ -1322,190 +1351,191 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) -} - -var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2859 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0xcb, 0x6f, 0x24, 0x47, - 0xf9, 0xee, 0x19, 0x8f, 0x3d, 0xf3, 0x8d, 0xc7, 0x8f, 0x5a, 0xef, 0xef, 0x37, 0x6b, 0x84, 0xc7, - 0xe9, 0xa0, 0x68, 0x03, 0xc9, 0x38, 0x5e, 0x42, 0xb4, 0xd9, 0x90, 0x80, 0xc7, 0xb3, 0xde, 0x98, - 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x40, 0x88, 0x50, 0xda, 0xdd, 0xe5, 0x71, 0xe3, 0x9e, 0xee, 0x49, - 0x55, 0x8f, 0x37, 0x03, 0x07, 0x72, 0x00, 0x01, 0x12, 0x8a, 0xc2, 0x8d, 0x13, 0x4a, 0x04, 0x7f, - 0x00, 0xe2, 0x02, 0x7f, 0x00, 0x12, 0x39, 0x06, 0x71, 0x89, 0x04, 0x1a, 0x25, 0xe6, 0xc0, 0x11, - 0x71, 0xf5, 0x05, 0x54, 0x8f, 0xee, 0xae, 0x9e, 0xc7, 0xba, 0x27, 0xbb, 0x44, 0xdc, 0xa6, 0xbf, - 0x77, 0x55, 0x7d, 0xf5, 0xbd, 0x6a, 0x60, 0xf7, 0xe4, 0x3a, 0xab, 0xbb, 0xc1, 0xfa, 0x49, 0xf7, - 0x90, 0x50, 0x9f, 0x84, 0x84, 0xad, 0x9f, 0x12, 0xdf, 0x09, 0xe8, 0xba, 0x42, 0x58, 0x1d, 0xb7, - 0x6d, 0xd9, 0xc7, 0xae, 0x4f, 0x68, 0x6f, 0xbd, 0x73, 0xd2, 0xe2, 0x00, 0xb6, 0xde, 0x26, 0xa1, - 0xb5, 0x7e, 0xba, 0xb1, 0xde, 0x22, 0x3e, 0xa1, 0x56, 0x48, 0x9c, 0x7a, 0x87, 0x06, 0x61, 0x80, - 0xbe, 0x20, 0xb9, 0xea, 0x3a, 0x57, 0xbd, 0x73, 0xd2, 0xe2, 0x00, 0x56, 0xe7, 0x5c, 0xf5, 0xd3, - 0x8d, 0x95, 0xa7, 0x5b, 0x6e, 0x78, 0xdc, 0x3d, 0xac, 0xdb, 0x41, 0x7b, 0xbd, 0x15, 0xb4, 0x82, - 0x75, 0xc1, 0x7c, 0xd8, 0x3d, 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0xc6, 0x9a, 0x42, - 0xbb, 0x7e, 0xe8, 0xb6, 0xc9, 0xa0, 0x15, 0x2b, 0xcf, 0x5d, 0xc4, 0xc0, 0xec, 0x63, 0xd2, 0xb6, - 0x06, 0xf9, 0xcc, 0x3f, 0xe5, 0xa1, 0xb8, 0xb9, 0xbf, 0x73, 0x8b, 0x06, 0xdd, 0x0e, 0x5a, 0x83, - 0x69, 0xdf, 0x6a, 0x93, 0xaa, 0xb1, 0x66, 0x5c, 0x2d, 0x35, 0xe6, 0x3e, 0xe8, 0xd7, 0xa6, 0xce, - 0xfa, 0xb5, 0xe9, 0x57, 0xad, 0x36, 0xc1, 0x02, 0x83, 0x3c, 0x28, 0x9e, 0x12, 0xca, 0xdc, 0xc0, - 0x67, 0xd5, 0xdc, 0x5a, 0xfe, 0x6a, 0xf9, 0xda, 0x4b, 0xf5, 0x2c, 0xeb, 0xaf, 0x0b, 0x05, 0xf7, - 0x24, 0xeb, 0x76, 0x40, 0x9b, 0x2e, 0xb3, 0x83, 0x53, 0x42, 0x7b, 0x8d, 0x45, 0xa5, 0xa5, 0xa8, - 0x90, 0x0c, 0xc7, 0x1a, 0xd0, 0x8f, 0x0c, 0x58, 0xec, 0x50, 0x72, 0x44, 0x28, 0x25, 0x8e, 0xc2, - 0x57, 0xf3, 0x6b, 0xc6, 0x23, 0x50, 0x5b, 0x55, 0x6a, 0x17, 0xf7, 0x07, 0xe4, 0xe3, 0x21, 0x8d, - 0xe8, 0xd7, 0x06, 0xac, 0x30, 0x42, 0x4f, 0x09, 0xdd, 0x74, 0x1c, 0x4a, 0x18, 0x6b, 0xf4, 0xb6, - 0x3c, 0x97, 0xf8, 0xe1, 0xd6, 0x4e, 0x13, 0xb3, 0xea, 0xb4, 0xd8, 0x87, 0xaf, 0x65, 0x33, 0xe8, - 0x60, 0x9c, 0x9c, 0x86, 0xa9, 0x2c, 0x5a, 0x19, 0x4b, 0xc2, 0xf0, 0x03, 0xcc, 0x30, 0x8f, 0x60, - 0x2e, 0x3a, 0xc8, 0xdb, 0x2e, 0x0b, 0xd1, 0x3d, 0x98, 0x69, 0xf1, 0x0f, 0x56, 0x35, 0x84, 0x81, - 0xf5, 0x6c, 0x06, 0x46, 0x32, 0x1a, 0xf3, 0xca, 0x9e, 0x19, 0xf1, 0xc9, 0xb0, 0x92, 0x66, 0xfe, - 0x6c, 0x1a, 0xca, 0x9b, 0xfb, 0x3b, 0x98, 0xb0, 0xa0, 0x4b, 0x6d, 0x92, 0xc1, 0x69, 0xae, 0xc3, - 0x1c, 0x73, 0xfd, 0x56, 0xd7, 0xb3, 0x28, 0x87, 0x56, 0x67, 0x04, 0xe5, 0xb2, 0xa2, 0x9c, 0x3b, - 0xd0, 0x70, 0x38, 0x45, 0x89, 0xae, 0x01, 0x70, 0x09, 0xac, 0x63, 0xd9, 0xc4, 0xa9, 0xe6, 0xd6, - 0x8c, 0xab, 0xc5, 0x06, 0x52, 0x7c, 0xf0, 0x6a, 0x8c, 0xc1, 0x1a, 0x15, 0x7a, 0x1c, 0x0a, 0xc2, - 0xd2, 0x6a, 0x51, 0xa8, 0xa9, 0x28, 0xf2, 0x82, 0x58, 0x06, 0x96, 0x38, 0xf4, 0x24, 0xcc, 0x2a, - 0x2f, 0xab, 0x96, 0x04, 0xd9, 0x82, 0x22, 0x9b, 0x8d, 0xdc, 0x20, 0xc2, 0xf3, 0xf5, 0x9d, 0xb8, - 0xbe, 0x23, 0xfc, 0x4e, 0x5b, 0xdf, 0x2b, 0xae, 0xef, 0x60, 0x81, 0x41, 0xb7, 0xa1, 0x70, 0x4a, - 0xe8, 0x21, 0xf7, 0x04, 0xee, 0x9a, 0x5f, 0xca, 0xb6, 0xd1, 0xf7, 0x38, 0x4b, 0xa3, 0xc4, 0x4d, - 0x13, 0x3f, 0xb1, 0x14, 0x82, 0xea, 0x00, 0xec, 0x38, 0xa0, 0xa1, 0x58, 0x5e, 0xb5, 0xb0, 0x96, - 0xbf, 0x5a, 0x6a, 0xcc, 0xf3, 0xf5, 0x1e, 0xc4, 0x50, 0xac, 0x51, 0x70, 0x7a, 0xdb, 0x0a, 0x49, - 0x2b, 0xa0, 0x2e, 0x61, 0xd5, 0xd9, 0x84, 0x7e, 0x2b, 0x86, 0x62, 0x8d, 0x02, 0x7d, 0x03, 0x10, - 0x0b, 0x03, 0x6a, 0xb5, 0x88, 0x5a, 0xea, 0xcb, 0x16, 0x3b, 0xae, 0x82, 0x58, 0xdd, 0x8a, 0x5a, - 0x1d, 0x3a, 0x18, 0xa2, 0xc0, 0x23, 0xb8, 0xcc, 0xdf, 0x19, 0xb0, 0xa0, 0xf9, 0x82, 0xf0, 0xbb, - 0xeb, 0x30, 0xd7, 0xd2, 0x6e, 0x9d, 0xf2, 0x8b, 0xf8, 0xb4, 0xf5, 0x1b, 0x89, 0x53, 0x94, 0x88, - 0x40, 0x89, 0x2a, 0x49, 0x51, 0x74, 0xd9, 0xc8, 0xec, 0xb4, 0x91, 0x0d, 0x89, 0x26, 0x0d, 0xc8, - 0x70, 0x22, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0x8a, 0x37, 0xe8, 0xaa, 0x16, 0xd3, 0x0c, 0xb1, - 0x7d, 0x73, 0x63, 0xe2, 0xd1, 0x05, 0x81, 0x20, 0xf7, 0x3f, 0x11, 0x08, 0x6e, 0x14, 0x7f, 0xf9, - 0x5e, 0x6d, 0xea, 0xed, 0xbf, 0xad, 0x4d, 0x99, 0xbf, 0x30, 0x60, 0x6e, 0xb3, 0xd3, 0xf1, 0x7a, - 0x7b, 0x9d, 0x50, 0x2c, 0xc0, 0x84, 0x19, 0x87, 0xf6, 0x70, 0xd7, 0x57, 0x0b, 0x05, 0x7e, 0xbf, - 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0xa3, 0x80, 0xda, 0x44, 0x5d, 0xb7, 0xf8, 0xfe, 0x6c, - 0x73, 0x20, 0x96, 0x38, 0x7e, 0xc8, 0x47, 0x2e, 0xf1, 0x9c, 0x5d, 0xcb, 0xb7, 0x5a, 0x84, 0xaa, - 0xcb, 0x11, 0x6f, 0xfd, 0xb6, 0x86, 0xc3, 0x29, 0x4a, 0xf3, 0xdf, 0x39, 0x28, 0x6d, 0x05, 0xbe, - 0xe3, 0x86, 0xea, 0x72, 0x85, 0xbd, 0xce, 0x50, 0xf0, 0xb8, 0xd3, 0xeb, 0x10, 0x2c, 0x30, 0xe8, - 0x79, 0x98, 0x61, 0xa1, 0x15, 0x76, 0x99, 0xb0, 0xa7, 0xd4, 0x78, 0x2c, 0x0a, 0x4b, 0x07, 0x02, - 0x7a, 0xde, 0xaf, 0x2d, 0xc4, 0xe2, 0x24, 0x08, 0x2b, 0x06, 0xee, 0xe9, 0xc1, 0xa1, 0xd8, 0x28, - 0xe7, 0x96, 0x4c, 0x7b, 0x51, 0xfe, 0xc8, 0x27, 0x9e, 0xbe, 0x37, 0x44, 0x81, 0x47, 0x70, 0xa1, - 0x53, 0x40, 0x9e, 0xc5, 0xc2, 0x3b, 0xd4, 0xf2, 0x99, 0xd0, 0x75, 0xc7, 0x6d, 0x13, 0x75, 0xe1, - 0xbf, 0x98, 0xed, 0xc4, 0x39, 0x47, 0xa2, 0xf7, 0xf6, 0x90, 0x34, 0x3c, 0x42, 0x03, 0x7a, 0x02, - 0x66, 0x28, 0xb1, 0x58, 0xe0, 0x57, 0x0b, 0x62, 0xf9, 0x71, 0x54, 0xc6, 0x02, 0x8a, 0x15, 0x96, - 0x07, 0xb4, 0x36, 0x61, 0xcc, 0x6a, 0x45, 0xe1, 0x35, 0x0e, 0x68, 0xbb, 0x12, 0x8c, 0x23, 0xbc, - 0xf9, 0x5b, 0x03, 0x2a, 0x5b, 0x94, 0x58, 0x21, 0x99, 0xc4, 0x2d, 0x3e, 0xf5, 0x89, 0xa3, 0x4d, - 0x58, 0x10, 0xdf, 0xf7, 0x2c, 0xcf, 0x75, 0xe4, 0x19, 0x4c, 0x0b, 0xe6, 0xff, 0x57, 0xcc, 0x0b, - 0xdb, 0x69, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x87, 0x4a, 0x93, 0x78, 0x24, 0x31, 0x79, 0x1b, - 0x50, 0x8b, 0x5a, 0x36, 0xd9, 0x27, 0xd4, 0x0d, 0x9c, 0x03, 0x62, 0x07, 0xbe, 0xc3, 0x84, 0x1b, - 0xe5, 0x1b, 0xff, 0xc7, 0xf7, 0xf7, 0xd6, 0x10, 0x16, 0x8f, 0xe0, 0x40, 0x1e, 0x54, 0x3a, 0x54, - 0xfc, 0x16, 0x7b, 0x2e, 0xbd, 0xac, 0x7c, 0xed, 0xcb, 0xd9, 0x8e, 0x74, 0x5f, 0x67, 0x6d, 0x2c, - 0x9d, 0xf5, 0x6b, 0x95, 0x14, 0x08, 0xa7, 0x85, 0xa3, 0xaf, 0xc3, 0x62, 0x40, 0x3b, 0xc7, 0x96, - 0xdf, 0x24, 0x1d, 0xe2, 0x3b, 0xc4, 0x0f, 0x99, 0xd8, 0xc8, 0x62, 0x63, 0x99, 0xd7, 0x22, 0x7b, - 0x03, 0x38, 0x3c, 0x44, 0x8d, 0x5e, 0x83, 0xa5, 0x0e, 0x0d, 0x3a, 0x56, 0x4b, 0x6c, 0xcc, 0x7e, - 0xe0, 0xb9, 0x76, 0x4f, 0x6d, 0xe7, 0x53, 0x67, 0xfd, 0xda, 0xd2, 0xfe, 0x20, 0xf2, 0xbc, 0x5f, - 0xbb, 0x24, 0xb6, 0x8e, 0x43, 0x12, 0x24, 0x1e, 0x16, 0xa3, 0xb9, 0x41, 0x61, 0x9c, 0x1b, 0x98, - 0x3b, 0x50, 0x6c, 0x76, 0xd5, 0x9d, 0x78, 0x11, 0x8a, 0x8e, 0xfa, 0xad, 0x76, 0x3e, 0xba, 0x9c, - 0x31, 0xcd, 0x79, 0xbf, 0x56, 0xe1, 0xe5, 0x67, 0x3d, 0x02, 0xe0, 0x98, 0xc5, 0x7c, 0x02, 0x8a, - 0xe2, 0xe0, 0xd9, 0xbd, 0x0d, 0xb4, 0x08, 0x79, 0x6c, 0xdd, 0x17, 0x52, 0xe6, 0x30, 0xff, 0xa9, - 0x45, 0xb1, 0x3d, 0x80, 0x5b, 0x24, 0x8c, 0x0e, 0x7e, 0x13, 0x16, 0xa2, 0x50, 0x9e, 0xce, 0x30, - 0xb1, 0x37, 0xe1, 0x34, 0x1a, 0x0f, 0xd2, 0x9b, 0xaf, 0x43, 0x49, 0x64, 0x21, 0x9e, 0xc2, 0x93, - 0x72, 0xc1, 0x78, 0x40, 0xb9, 0x10, 0xd5, 0x00, 0xb9, 0x71, 0x35, 0x80, 0x66, 0xae, 0x07, 0x15, - 0xc9, 0x1b, 0x15, 0x48, 0x99, 0x34, 0x3c, 0x05, 0xc5, 0xc8, 0x4c, 0xa5, 0x25, 0x2e, 0x8c, 0x23, - 0x41, 0x38, 0xa6, 0xd0, 0xb4, 0x1d, 0x43, 0x2a, 0xa3, 0x66, 0x53, 0xa6, 0x55, 0x3f, 0xb9, 0x07, - 0x57, 0x3f, 0x9a, 0xa6, 0x1f, 0x42, 0x75, 0x5c, 0x35, 0xfd, 0x10, 0x39, 0x3f, 0xbb, 0x29, 0xe6, - 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xb2, 0x1f, 0x5f, 0x76, 0x25, 0x17, 0x57, 0x7b, 0xda, 0x8e, 0xfc, - 0xca, 0x80, 0xe5, 0xd4, 0xd2, 0x26, 0x3a, 0xf1, 0x09, 0x8c, 0xd2, 0x9d, 0x23, 0x3f, 0x81, 0x73, - 0xfc, 0x25, 0x07, 0x95, 0xdb, 0xd6, 0x21, 0xf1, 0x0e, 0x88, 0x47, 0xec, 0x30, 0xa0, 0xe8, 0x07, - 0x50, 0x6e, 0x5b, 0xa1, 0x7d, 0x2c, 0xa0, 0x51, 0x67, 0xd0, 0xcc, 0x16, 0xec, 0x52, 0x92, 0xea, - 0xbb, 0x89, 0x98, 0x9b, 0x7e, 0x48, 0x7b, 0x8d, 0x4b, 0xca, 0xa4, 0xb2, 0x86, 0xc1, 0xba, 0x36, - 0xd1, 0xce, 0x89, 0xef, 0x9b, 0x6f, 0x75, 0x78, 0xd9, 0x32, 0x79, 0x17, 0x99, 0x32, 0x01, 0x93, - 0x37, 0xbb, 0x2e, 0x25, 0x6d, 0xe2, 0x87, 0x49, 0x3b, 0xb7, 0x3b, 0x20, 0x1f, 0x0f, 0x69, 0x5c, - 0x79, 0x09, 0x16, 0x07, 0x8d, 0xe7, 0xf1, 0xe7, 0x84, 0xf4, 0xe4, 0x79, 0x61, 0xfe, 0x13, 0x2d, - 0x43, 0xe1, 0xd4, 0xf2, 0xba, 0xea, 0x36, 0x62, 0xf9, 0x71, 0x23, 0x77, 0xdd, 0x30, 0x7f, 0x63, - 0x40, 0x75, 0x9c, 0x21, 0xe8, 0xf3, 0x9a, 0xa0, 0x46, 0x59, 0x59, 0x95, 0x7f, 0x85, 0xf4, 0xa4, - 0xd4, 0x9b, 0x50, 0x0c, 0x3a, 0xbc, 0xa6, 0x08, 0xa8, 0x3a, 0xf5, 0x27, 0xa3, 0x93, 0xdc, 0x53, - 0xf0, 0xf3, 0x7e, 0xed, 0x72, 0x4a, 0x7c, 0x84, 0xc0, 0x31, 0x2b, 0x8f, 0xd4, 0xc2, 0x1e, 0x9e, - 0x3d, 0xe2, 0x48, 0x7d, 0x4f, 0x40, 0xb0, 0xc2, 0x98, 0x7f, 0x30, 0x60, 0x5a, 0x14, 0xe4, 0xaf, - 0x43, 0x91, 0xef, 0x9f, 0x63, 0x85, 0x96, 0xb0, 0x2b, 0x73, 0x2b, 0xc8, 0xb9, 0x77, 0x49, 0x68, - 0x25, 0xde, 0x16, 0x41, 0x70, 0x2c, 0x11, 0x61, 0x28, 0xb8, 0x21, 0x69, 0x47, 0x07, 0xf9, 0xf4, - 0x58, 0xd1, 0x6a, 0x10, 0x51, 0xc7, 0xd6, 0xfd, 0x9b, 0x6f, 0x85, 0xc4, 0xe7, 0x87, 0x91, 0x5c, - 0x8d, 0x1d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x55, 0x71, 0xe7, 0x67, 0xc4, 0x3b, - 0xba, 0xed, 0xfa, 0x27, 0x6a, 0x5b, 0x63, 0x73, 0x0e, 0x14, 0x1c, 0xc7, 0x14, 0xa3, 0xd2, 0x43, - 0x6e, 0xb2, 0xf4, 0xc0, 0x15, 0xda, 0x81, 0x1f, 0xba, 0x7e, 0x77, 0xe8, 0xb6, 0x6d, 0x29, 0x38, - 0x8e, 0x29, 0x78, 0x21, 0x42, 0x49, 0xdb, 0x72, 0x7d, 0xd7, 0x6f, 0xf1, 0x45, 0x6c, 0x05, 0x5d, - 0x3f, 0x14, 0x19, 0x59, 0x15, 0x22, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xbf, 0x9f, 0x86, 0x32, - 0x5f, 0x73, 0x94, 0xe7, 0x5e, 0x80, 0x8a, 0xa7, 0x7b, 0x81, 0x5a, 0xfb, 0x65, 0x65, 0x4a, 0xfa, - 0x5e, 0xe3, 0x34, 0x2d, 0x67, 0x16, 0x25, 0x54, 0xcc, 0x9c, 0x4b, 0x33, 0x6f, 0xeb, 0x48, 0x9c, - 0xa6, 0xe5, 0xd1, 0xeb, 0x3e, 0xbf, 0x1f, 0xaa, 0x32, 0x89, 0x8f, 0xe8, 0x9b, 0x1c, 0x88, 0x25, - 0x0e, 0xed, 0xc2, 0x25, 0xcb, 0xf3, 0x82, 0xfb, 0x02, 0xd8, 0x08, 0x82, 0x93, 0xb6, 0x45, 0x4f, - 0x98, 0x68, 0xa6, 0x8b, 0x8d, 0xcf, 0x29, 0x96, 0x4b, 0x9b, 0xc3, 0x24, 0x78, 0x14, 0xdf, 0xa8, - 0x63, 0x9b, 0x9e, 0xf0, 0xd8, 0x8e, 0x61, 0x79, 0x00, 0x24, 0x6e, 0xb9, 0xea, 0x6c, 0x9f, 0x55, - 0x72, 0x96, 0xf1, 0x08, 0x9a, 0xf3, 0x31, 0x70, 0x3c, 0x52, 0x22, 0xba, 0x01, 0xf3, 0xdc, 0x93, - 0x83, 0x6e, 0x18, 0xd5, 0x9d, 0x05, 0x71, 0xdc, 0xe8, 0xac, 0x5f, 0x9b, 0xbf, 0x93, 0xc2, 0xe0, - 0x01, 0x4a, 0xbe, 0xb9, 0x9e, 0xdb, 0x76, 0xc3, 0xea, 0xac, 0x60, 0x89, 0x37, 0xf7, 0x36, 0x07, - 0x62, 0x89, 0x4b, 0x79, 0x60, 0xf1, 0x22, 0x0f, 0x34, 0xff, 0x9c, 0x07, 0x24, 0x6b, 0x6d, 0x47, - 0xd6, 0x53, 0x32, 0xa4, 0xf1, 0x8e, 0x40, 0xd5, 0xea, 0xc6, 0x40, 0x47, 0xa0, 0xca, 0xf4, 0x08, - 0x8f, 0x76, 0xa1, 0x24, 0x43, 0x4b, 0x72, 0x5d, 0xd6, 0x15, 0x71, 0x69, 0x2f, 0x42, 0x9c, 0xf7, - 0x6b, 0x2b, 0x29, 0x35, 0x31, 0x46, 0x74, 0x6b, 0x89, 0x04, 0x74, 0x0d, 0xc0, 0xea, 0xb8, 0xfa, - 0xbc, 0xae, 0x94, 0x4c, 0x6d, 0x92, 0xce, 0x1b, 0x6b, 0x54, 0xe8, 0x65, 0x98, 0x0e, 0x3f, 0x5d, - 0x47, 0x55, 0x14, 0x0d, 0x23, 0xef, 0x9f, 0x84, 0x04, 0xae, 0x5d, 0xf8, 0x33, 0xe3, 0x66, 0xa9, - 0x66, 0x28, 0xd6, 0xbe, 0x1d, 0x63, 0xb0, 0x46, 0x85, 0xbe, 0x05, 0xc5, 0x23, 0x55, 0x8a, 0x8a, - 0x83, 0xc9, 0x1c, 0x22, 0xa3, 0x02, 0x56, 0x8e, 0x0c, 0xa2, 0x2f, 0x1c, 0x4b, 0x43, 0x5f, 0x81, - 0x32, 0xeb, 0x1e, 0xc6, 0xd9, 0x5b, 0x9e, 0x66, 0x9c, 0x2a, 0x0f, 0x12, 0x14, 0xd6, 0xe9, 0xcc, - 0x37, 0xa1, 0xb4, 0xeb, 0xda, 0x34, 0x10, 0x3d, 0xe0, 0x93, 0x30, 0xcb, 0x52, 0x0d, 0x4e, 0x7c, - 0x92, 0x91, 0x97, 0x45, 0x78, 0xee, 0x5e, 0xbe, 0xe5, 0x07, 0xb2, 0x8d, 0x29, 0x24, 0xee, 0xf5, - 0x2a, 0x07, 0x62, 0x89, 0xbb, 0xb1, 0xcc, 0x0b, 0x84, 0x9f, 0xbe, 0x5f, 0x9b, 0x7a, 0xf7, 0xfd, - 0xda, 0xd4, 0x7b, 0xef, 0xab, 0x62, 0xe1, 0x1c, 0x00, 0xf6, 0x0e, 0xbf, 0x47, 0x6c, 0x19, 0x76, - 0x33, 0x8d, 0xf5, 0xa2, 0x69, 0xb2, 0x18, 0xeb, 0xe5, 0x06, 0x8a, 0x3e, 0x0d, 0x87, 0x53, 0x94, - 0x68, 0x1d, 0x4a, 0xf1, 0xc0, 0x4e, 0xf9, 0xc7, 0x52, 0xe4, 0x6f, 0xf1, 0x54, 0x0f, 0x27, 0x34, - 0xa9, 0x1c, 0x30, 0x7d, 0x61, 0x0e, 0x68, 0x40, 0xbe, 0xeb, 0x3a, 0xaa, 0x61, 0x7e, 0x26, 0xca, - 0xc1, 0x77, 0x77, 0x9a, 0xe7, 0xfd, 0xda, 0x63, 0xe3, 0xe6, 0xe4, 0x61, 0xaf, 0x43, 0x58, 0xfd, - 0xee, 0x4e, 0x13, 0x73, 0xe6, 0x51, 0x01, 0x69, 0x66, 0xc2, 0x80, 0x74, 0x0d, 0xa0, 0x95, 0x8c, - 0x1d, 0xe4, 0x7d, 0x8f, 0x1d, 0x51, 0x1b, 0x37, 0x68, 0x54, 0x88, 0xc1, 0x92, 0xcd, 0x5b, 0x73, - 0xd5, 0xfe, 0xb3, 0xd0, 0x6a, 0xcb, 0x41, 0xe6, 0x64, 0x77, 0xe2, 0x8a, 0x52, 0xb3, 0xb4, 0x35, - 0x28, 0x0c, 0x0f, 0xcb, 0x47, 0x01, 0x2c, 0x39, 0xaa, 0x43, 0x4c, 0x94, 0x96, 0x26, 0x56, 0x7a, - 0x99, 0x2b, 0x6c, 0x0e, 0x0a, 0xc2, 0xc3, 0xb2, 0xd1, 0x77, 0x61, 0x25, 0x02, 0x0e, 0xb7, 0xe9, - 0x22, 0x60, 0xe7, 0x1b, 0xab, 0x67, 0xfd, 0xda, 0x4a, 0x73, 0x2c, 0x15, 0x7e, 0x80, 0x04, 0xe4, - 0xc0, 0x8c, 0x27, 0x0b, 0xdc, 0xb2, 0x28, 0x4a, 0xbe, 0x9a, 0x6d, 0x15, 0x89, 0xf7, 0xd7, 0xf5, - 0xc2, 0x36, 0x1e, 0xb9, 0xa8, 0x9a, 0x56, 0xc9, 0x46, 0x6f, 0x41, 0xd9, 0xf2, 0xfd, 0x20, 0xb4, - 0xe4, 0xe0, 0x60, 0x4e, 0xa8, 0xda, 0x9c, 0x58, 0xd5, 0x66, 0x22, 0x63, 0xa0, 0x90, 0xd6, 0x30, - 0x58, 0x57, 0x85, 0xee, 0xc3, 0x42, 0x70, 0xdf, 0x27, 0x14, 0x93, 0x23, 0x42, 0x89, 0x6f, 0x13, - 0x56, 0xad, 0x08, 0xed, 0xcf, 0x66, 0xd4, 0x9e, 0x62, 0x4e, 0x5c, 0x3a, 0x0d, 0x67, 0x78, 0x50, - 0x0b, 0xaa, 0xf3, 0xd8, 0xea, 0x5b, 0x9e, 0xfb, 0x7d, 0x42, 0x59, 0x75, 0x3e, 0x99, 0x35, 0x6f, - 0xc7, 0x50, 0xac, 0x51, 0xf0, 0xe8, 0x67, 0x7b, 0x5d, 0x16, 0x12, 0x39, 0xf8, 0x5f, 0x48, 0x47, - 0xbf, 0xad, 0x04, 0x85, 0x75, 0x3a, 0xd4, 0x85, 0x4a, 0x5b, 0xcf, 0x34, 0xd5, 0x25, 0xb1, 0xba, - 0xeb, 0xd9, 0x56, 0x37, 0x9c, 0x0b, 0x93, 0xc2, 0x27, 0x85, 0xc3, 0x69, 0x2d, 0x2b, 0xcf, 0x43, - 0xf9, 0x53, 0xf6, 0x04, 0xbc, 0xa7, 0x18, 0x3c, 0xc7, 0x89, 0x7a, 0x8a, 0x3f, 0xe6, 0x60, 0x3e, - 0xbd, 0xfb, 0x03, 0x59, 0xb4, 0x90, 0x29, 0x8b, 0x46, 0xdd, 0xab, 0x31, 0xf6, 0xad, 0x22, 0x0a, - 0xeb, 0xf9, 0xb1, 0x61, 0x5d, 0x45, 0xcf, 0xe9, 0x87, 0x89, 0x9e, 0x75, 0x00, 0x5e, 0x9e, 0xd0, - 0xc0, 0xf3, 0x08, 0x15, 0x81, 0xb3, 0xa8, 0xde, 0x24, 0x62, 0x28, 0xd6, 0x28, 0x78, 0x11, 0x7d, - 0xe8, 0x05, 0xf6, 0x89, 0xd8, 0x82, 0xe8, 0xd2, 0x8b, 0x90, 0x59, 0x94, 0x45, 0x74, 0x63, 0x08, - 0x8b, 0x47, 0x70, 0x98, 0x3d, 0xb8, 0xbc, 0x6f, 0xd1, 0xd0, 0xb5, 0xbc, 0xe4, 0x82, 0x89, 0x2e, - 0xe5, 0x8d, 0xa1, 0x1e, 0xe8, 0x99, 0x49, 0x2f, 0x6a, 0xb2, 0xf9, 0x09, 0x2c, 0xe9, 0x83, 0xcc, - 0xbf, 0x1a, 0x70, 0x65, 0xa4, 0xee, 0xcf, 0xa0, 0x07, 0x7b, 0x23, 0xdd, 0x83, 0xbd, 0x90, 0x71, - 0x78, 0x39, 0xca, 0xda, 0x31, 0x1d, 0xd9, 0x2c, 0x14, 0xf6, 0x79, 0xed, 0x6b, 0x7e, 0x68, 0xc0, - 0x9c, 0xf8, 0x35, 0xc9, 0xec, 0xb8, 0x96, 0x7e, 0x52, 0x28, 0x3d, 0xba, 0xe7, 0x84, 0x47, 0x31, - 0x5c, 0x7e, 0xc7, 0x80, 0xf4, 0xd4, 0x16, 0xbd, 0x24, 0xaf, 0x80, 0x11, 0x8f, 0x55, 0x27, 0x74, - 0xff, 0x17, 0xc7, 0x35, 0xa1, 0x97, 0x32, 0xcd, 0x27, 0x9f, 0x82, 0x12, 0x0e, 0x82, 0x70, 0xdf, - 0x0a, 0x8f, 0x19, 0xdf, 0xbb, 0x0e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, - 0x7f, 0x6e, 0xc0, 0x95, 0xb1, 0x2f, 0x45, 0x3c, 0x8a, 0xd8, 0xf1, 0x97, 0x5a, 0x51, 0xec, 0xc8, - 0x09, 0x1d, 0xd6, 0xa8, 0x78, 0xf7, 0x98, 0x7a, 0x5e, 0x1a, 0xec, 0x1e, 0x53, 0xda, 0x70, 0x9a, - 0xd6, 0xfc, 0x67, 0x0e, 0xd4, 0xd3, 0xcc, 0x7f, 0xd9, 0xe9, 0x9f, 0x18, 0x78, 0x18, 0x9a, 0x4f, - 0x3f, 0x0c, 0xc5, 0xaf, 0x40, 0xda, 0xcb, 0x48, 0xfe, 0xc1, 0x2f, 0x23, 0xe8, 0xb9, 0xf8, 0xb1, - 0x45, 0xfa, 0xd0, 0x6a, 0xfa, 0xb1, 0xe5, 0xbc, 0x5f, 0x9b, 0x53, 0xc2, 0xd3, 0x8f, 0x2f, 0xaf, - 0xc1, 0xac, 0x43, 0x42, 0xcb, 0xf5, 0x64, 0x27, 0x98, 0xf9, 0xf9, 0x40, 0x0a, 0x6b, 0x4a, 0xd6, - 0x46, 0x99, 0xdb, 0xa4, 0x3e, 0x70, 0x24, 0x90, 0x07, 0x6c, 0x3b, 0x70, 0x64, 0x23, 0x53, 0x48, - 0x02, 0xf6, 0x56, 0xe0, 0x10, 0x2c, 0x30, 0xe6, 0xbb, 0x06, 0x94, 0xa5, 0xa4, 0x2d, 0xab, 0xcb, - 0x08, 0xda, 0x88, 0x57, 0x21, 0x8f, 0xfb, 0x8a, 0xfe, 0xaa, 0x76, 0xde, 0xaf, 0x95, 0x04, 0x99, - 0xe8, 0x81, 0x46, 0xbc, 0x1e, 0xe5, 0x2e, 0xd8, 0xa3, 0xc7, 0xa1, 0x20, 0x2e, 0x90, 0xda, 0xcc, - 0xe4, 0x79, 0x90, 0x03, 0xb1, 0xc4, 0x99, 0x1f, 0xe7, 0xa0, 0x92, 0x5a, 0x5c, 0x86, 0x76, 0x22, - 0x1e, 0x9a, 0xe6, 0x32, 0x0c, 0xe2, 0xc7, 0x3f, 0xc6, 0xab, 0xf4, 0x35, 0xf3, 0x30, 0xe9, 0xeb, - 0xdb, 0x30, 0x63, 0xf3, 0x3d, 0x8a, 0xfe, 0xdb, 0xb1, 0x31, 0xc9, 0x71, 0x8a, 0xdd, 0x4d, 0xbc, - 0x51, 0x7c, 0x32, 0xac, 0x04, 0xa2, 0x5b, 0xb0, 0x44, 0x49, 0x48, 0x7b, 0x9b, 0x47, 0x21, 0xa1, - 0xfa, 0xf8, 0xa0, 0x90, 0x14, 0xed, 0x78, 0x90, 0x00, 0x0f, 0xf3, 0x98, 0x87, 0x30, 0x77, 0xc7, - 0x3a, 0xf4, 0xe2, 0x07, 0x31, 0x0c, 0x15, 0xd7, 0xb7, 0xbd, 0xae, 0x43, 0x64, 0x40, 0x8f, 0xa2, - 0x57, 0x74, 0x69, 0x77, 0x74, 0xe4, 0x79, 0xbf, 0x76, 0x29, 0x05, 0x90, 0x2f, 0x40, 0x38, 0x2d, - 0xc2, 0xf4, 0x60, 0xfa, 0x33, 0x6c, 0x40, 0xbf, 0x03, 0xa5, 0xa4, 0x45, 0x78, 0xc4, 0x2a, 0xcd, - 0x37, 0xa0, 0xc8, 0x3d, 0x3e, 0x6a, 0x6d, 0x2f, 0xa8, 0x92, 0xd2, 0xb5, 0x57, 0x2e, 0x4b, 0xed, - 0x25, 0x9e, 0x55, 0xef, 0x76, 0x9c, 0x87, 0x7c, 0x56, 0xcd, 0x3d, 0x4c, 0xe6, 0xcb, 0x4f, 0x98, - 0xf9, 0xae, 0x81, 0xfc, 0xeb, 0x09, 0x4f, 0x32, 0xb2, 0x80, 0xd0, 0x92, 0x8c, 0x9e, 0xff, 0xb5, - 0x37, 0x85, 0x1f, 0x1b, 0x00, 0x62, 0x78, 0x77, 0xf3, 0x94, 0xf8, 0x61, 0x86, 0x07, 0xfc, 0xbb, - 0x30, 0x13, 0x48, 0x8f, 0x94, 0x4f, 0xab, 0x13, 0x4e, 0x88, 0xe3, 0x8b, 0x24, 0x7d, 0x12, 0x2b, - 0x61, 0x8d, 0xab, 0x1f, 0x7c, 0xb2, 0x3a, 0xf5, 0xe1, 0x27, 0xab, 0x53, 0x1f, 0x7d, 0xb2, 0x3a, - 0xf5, 0xf6, 0xd9, 0xaa, 0xf1, 0xc1, 0xd9, 0xaa, 0xf1, 0xe1, 0xd9, 0xaa, 0xf1, 0xd1, 0xd9, 0xaa, - 0xf1, 0xf1, 0xd9, 0xaa, 0xf1, 0xee, 0xdf, 0x57, 0xa7, 0x5e, 0xcb, 0x9d, 0x6e, 0xfc, 0x27, 0x00, - 0x00, 0xff, 0xff, 0x7e, 0xef, 0x1e, 0xdd, 0xf0, 0x27, 0x00, 0x00, + proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_a8431b6e0aeeb761) +} + +var fileDescriptor_a8431b6e0aeeb761 = []byte{ + // 2873 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57, + 0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa, + 0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44, + 0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49, + 0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f, + 0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84, + 0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7, + 0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e, + 0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26, + 0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03, + 0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5, + 0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf, + 0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b, + 0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e, + 0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76, + 0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f, + 0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28, + 0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26, + 0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28, + 0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29, + 0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f, + 0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, + 0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d, + 0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86, + 0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5, + 0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, + 0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e, + 0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a, + 0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb, + 0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71, + 0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53, + 0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7, + 0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, + 0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5, + 0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95, + 0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b, + 0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8, + 0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75, + 0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b, + 0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09, + 0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, + 0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3, + 0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3, + 0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74, + 0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81, + 0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2, + 0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22, + 0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1, + 0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60, + 0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6, + 0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1, + 0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82, + 0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8, + 0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, + 0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56, + 0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e, + 0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47, + 0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc, + 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79, + 0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13, + 0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77, + 0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a, + 0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e, + 0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45, + 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98, + 0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3, + 0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e, + 0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18, + 0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d, + 0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69, + 0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91, + 0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54, + 0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63, + 0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b, + 0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92, + 0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23, + 0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57, + 0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b, + 0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a, + 0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f, + 0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c, + 0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f, + 0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e, + 0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41, + 0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37, + 0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf, + 0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73, + 0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15, + 0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88, + 0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69, + 0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7, + 0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d, + 0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7, + 0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7, + 0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14, + 0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1, + 0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17, + 0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc, + 0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f, + 0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98, + 0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b, + 0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51, + 0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c, + 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a, + 0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3, + 0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e, + 0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd, + 0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf, + 0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b, + 0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65, + 0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6, + 0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5, + 0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59, + 0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc, + 0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17, + 0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6, + 0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75, + 0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43, + 0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86, + 0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c, + 0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53, + 0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54, + 0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, + 0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2, + 0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91, + 0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77, + 0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4, + 0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1, + 0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0, + 0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8, + 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8, + 0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30, + 0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a, + 0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39, + 0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb, + 0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95, + 0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0, + 0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04, + 0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66, + 0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d, + 0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0, + 0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1, + 0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09, + 0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd, + 0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70, + 0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a, + 0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2, + 0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5, + 0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9, + 0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80, + 0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a, + 0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef, + 0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2, + 0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, + 0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06, + 0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f, + 0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a, + 0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81, + 0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52, + 0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f, + 0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad, + 0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78, + 0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03, + 0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52, + 0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a, + 0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19, + 0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90, + 0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8, + 0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda, + 0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98, + 0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73, + 0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0, + 0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82, + 0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc, + 0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f, + 0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f, + 0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31, + 0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31, + 0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2025,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2504,6 +2576,16 @@ func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SendInitialEvents != nil { + i-- + if *m.SendInitialEvents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } i -= len(m.ResourceVersionMatch) copy(dAtA[i:], m.ResourceVersionMatch) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersionMatch))) @@ -2664,11 +2746,6 @@ func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x8a } } - i -= len(m.ClusterName) - copy(dAtA[i:], m.ClusterName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) - i-- - dAtA[i] = 0x7a if len(m.Finalizers) > 0 { for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Finalizers[iNdEx]) @@ -3709,6 +3786,25 @@ func (m *Duration) Size() (n int) { return n } +func (m *FieldSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *FieldsV1) Size() (n int) { if m == nil { return 0 @@ -3914,6 +4010,9 @@ func (m *ListOptions) Size() (n int) { n += 2 l = len(m.ResourceVersionMatch) n += 1 + l + sovGenerated(uint64(l)) + if m.SendInitialEvents != nil { + n += 2 + } return n } @@ -4000,8 +4099,6 @@ func (m *ObjectMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.ClusterName) - n += 1 + l + sovGenerated(uint64(l)) if len(m.ManagedFields) > 0 { for _, e := range m.ManagedFields { l = e.Size() @@ -4423,6 +4520,18 @@ func (this *Duration) String() string { }, "") return s } +func (this *FieldSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *GetOptions) String() string { if this == nil { return "nil" @@ -4525,6 +4634,7 @@ func (this *ListOptions) String() string { `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `ResourceVersionMatch:` + fmt.Sprintf("%v", this.ResourceVersionMatch) + `,`, + `SendInitialEvents:` + valueToStringGenerated(this.SendInitialEvents) + `,`, `}`, }, "") return s @@ -4594,7 +4704,6 @@ func (this *ObjectMeta) String() string { `Annotations:` + mapStringForAnnotations + `,`, `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, `ManagedFields:` + repeatedStringForManagedFields + `,`, `}`, }, "") @@ -6437,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8259,6 +8514,27 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } m.ResourceVersionMatch = ResourceVersionMatch(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendInitialEvents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SendInitialEvents = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9211,38 +9487,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index da8f61447..18dd0b067 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -25,7 +25,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1"; +option go_package = "k8s.io/apimachinery/pkg/apis/meta/v1"; // APIGroup contains the name, the supported versions, and the preferred version // of a group. @@ -34,6 +34,7 @@ message APIGroup { optional string name = 1; // versions are the versions supported in this group. + // +listType=atomic repeated GroupVersionForDiscovery versions = 2; // preferredVersion is the version preferred by the API server, which @@ -49,6 +50,7 @@ message APIGroup { // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. // +optional + // +listType=atomic repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; } @@ -56,6 +58,7 @@ message APIGroup { // /apis. message APIGroupList { // groups is a list of APIGroup. + // +listType=atomic repeated APIGroup groups = 1; } @@ -88,9 +91,11 @@ message APIResource { optional Verbs verbs = 4; // shortNames is a list of suggested short names of the resource. + // +listType=atomic repeated string shortNames = 5; // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + // +listType=atomic repeated string categories = 7; // The hash value of the storage version, the version this resource is @@ -112,6 +117,7 @@ message APIResourceList { optional string groupVersion = 1; // resources contains the name of the resources and if they are namespaced. + // +listType=atomic repeated APIResource resources = 2; } @@ -122,6 +128,7 @@ message APIResourceList { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { // versions are the api versions that are available. + // +listType=atomic repeated string versions = 1; // a map of client CIDR to server address that is serving this group. @@ -131,6 +138,7 @@ message APIVersions { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +listType=atomic repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; } @@ -145,6 +153,7 @@ message ApplyOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // Force is going to "force" Apply requests. It means user will @@ -235,6 +244,7 @@ message CreateOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // fieldManager is a name associated with the actor or entity @@ -244,16 +254,22 @@ message CreateOptions { // +optional optional string fieldManager = 3; - // fieldValidation determines how the server should respond to - // unknown/duplicate fields in the object in the request. - // Introduced as alpha in 1.23, older servers or servers with the - // `ServerSideFieldValidation` feature disabled will discard valid values - // specified in this param and not perform any server side field validation. - // Valid values are: - // - Ignore: ignores unknown/duplicate fields. - // - Warn: responds with a warning for each - // unknown/duplicate field, but successfully serves the request. - // - Strict: fails the request on unknown/duplicate fields. + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. // +optional optional string fieldValidation = 4; } @@ -297,6 +313,7 @@ message DeleteOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 5; } @@ -307,6 +324,25 @@ message Duration { optional int64 duration = 1; } +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message FieldSelectorRequirement { + // key is the field selector key that the requirement applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + optional string operator = 2; + + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + repeated string values = 3; +} + // FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. // // Each key is either a '.' representing the field itself, and will always map to an empty set, @@ -412,6 +448,7 @@ message LabelSelector { // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +optional + // +listType=atomic repeated LabelSelectorRequirement matchExpressions = 2; } @@ -419,8 +456,6 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. @@ -432,6 +467,7 @@ message LabelSelectorRequirement { // the values array must be empty. This array is replaced during a strategic // merge patch. // +optional + // +listType=atomic repeated string values = 3; } @@ -443,19 +479,13 @@ message List { optional ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // ListMeta describes metadata that synthetic resources must have, including lists and // various status objects. A resource may have only one of {ObjectMeta, ListMeta}. message ListMeta { - // selfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. + // Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. // +optional optional string selfLink = 1; @@ -572,6 +602,32 @@ message ListOptions { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + optional bool sendInitialEvents = 11; } // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource @@ -590,7 +646,11 @@ message ManagedFieldsEntry { // set because it cannot be automatically converted. optional string apiVersion = 3; - // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // Time is the timestamp of when the ManagedFields entry was added. The + // timestamp will also be updated if a field is added, the manager + // changes any of the owned fields value or removes a field. The + // timestamp does not update when a field is removed from the entry + // because another manager took it over. // +optional optional Time time = 4; @@ -638,7 +698,7 @@ message ObjectMeta { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional optional string name = 1; @@ -650,10 +710,7 @@ message ObjectMeta { // and may be truncated by the length of the suffix required to make the value // unique on the server. // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). + // If this field is specified and the generated name exists, the server will return a 409. // // Applied only if Name is not specified. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency @@ -667,17 +724,11 @@ message ObjectMeta { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional optional string namespace = 3; - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. + // Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. // +optional optional string selfLink = 4; @@ -687,7 +738,7 @@ message ObjectMeta { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 5; @@ -751,14 +802,14 @@ message ObjectMeta { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional map labels = 11; // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional map annotations = 12; @@ -769,6 +820,8 @@ message ObjectMeta { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid repeated OwnerReference ownerReferences = 13; // Must be empty before the object is deleted from the registry. Each entry @@ -786,14 +839,9 @@ message ObjectMeta { // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge + // +listType=set repeated string finalizers = 14; - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - optional string clusterName = 15; - // ManagedFields maps workflow-id and version to the set of fields // that are managed by that workflow. This is mostly for internal // housekeeping, and users typically shouldn't need to set or @@ -803,6 +851,7 @@ message ObjectMeta { // workflow used when modifying the object. // // +optional + // +listType=atomic repeated ManagedFieldsEntry managedFields = 17; } @@ -819,11 +868,11 @@ message OwnerReference { optional string kind = 1; // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names optional string name = 3; // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids optional string uid = 4; // If true, this reference points to the managing controller. @@ -833,6 +882,8 @@ message OwnerReference { // If true, AND if the owner has the "foregroundDeletion" finalizer, then // the owner cannot be deleted from the key-value store until this // reference is removed. + // See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + // for how the garbage collector interacts with this field and enforces the foreground deletion. // Defaults to false. // To set this field, a user needs "delete" permission of the owner, // otherwise 422 (Unprocessable Entity) will be returned. @@ -875,6 +926,7 @@ message PatchOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // Force is going to "force" Apply requests. It means user will @@ -893,16 +945,22 @@ message PatchOptions { // +optional optional string fieldManager = 3; - // fieldValidation determines how the server should respond to - // unknown/duplicate fields in the object in the request. - // Introduced as alpha in 1.23, older servers or servers with the - // `ServerSideFieldValidation` feature disabled will discard valid values - // specified in this param and not perform any server side field validation. - // Valid values are: - // - Ignore: ignores unknown/duplicate fields. - // - Warn: responds with a warning for each - // unknown/duplicate field, but successfully serves the request. - // - Strict: fails the request on unknown/duplicate fields. + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. // +optional optional string fieldValidation = 4; } @@ -922,6 +980,7 @@ message Preconditions { // For example: "/healthz", "/apis". message RootPaths { // paths are the paths available at root. + // +listType=atomic repeated string paths = 1; } @@ -964,6 +1023,7 @@ message Status { // is not guaranteed to conform to any schema except that defined by // the reason type. // +optional + // +listType=atomic optional StatusDetails details = 5; // Suggested HTTP return code for this status, 0 if not set. @@ -1021,13 +1081,14 @@ message StatusDetails { // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 6; // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional + // +listType=atomic repeated StatusCause causes = 4; // If specified, the time in seconds before the operation should be retried. Some errors may indicate @@ -1114,6 +1175,7 @@ message UpdateOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // fieldManager is a name associated with the actor or entity @@ -1123,16 +1185,22 @@ message UpdateOptions { // +optional optional string fieldManager = 2; - // fieldValidation determines how the server should respond to - // unknown/duplicate fields in the object in the request. - // Introduced as alpha in 1.23, older servers or servers with the - // `ServerSideFieldValidation` feature disabled will discard valid values - // specified in this param and not perform any server side field validation. - // Valid values are: - // - Ignore: ignores unknown/duplicate fields. - // - Warn: responds with a warning for each - // unknown/duplicate field, but successfully serves the request. - // - Strict: fails the request on unknown/duplicate fields. + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. // +optional optional string fieldValidation = 3; } @@ -1160,6 +1228,6 @@ message WatchEvent { // * If Type is Deleted: the state of the object immediately before deletion. // * If Type is Error: *Status is recommended; other types may make sense // depending on context. - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 2b6a30b65..c748071ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" + utiljson "k8s.io/apimachinery/pkg/util/json" ) // LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements @@ -58,7 +60,7 @@ func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { case LabelSelectorOpDoesNotExist: op = selection.DoesNotExist default: - return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + return nil, fmt.Errorf("%q is not a valid label selector operator", expr.Operator) } r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) if err != nil { @@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) { if f.Raw == nil { return []byte("null"), nil } + if f.getContentType() == fieldsV1InvalidOrValidCBORObject { + var u map[string]interface{} + if err := cbor.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err) + } + return utiljson.Marshal(u) + } return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler func (f *FieldsV1) UnmarshalJSON(b []byte) error { if f == nil { - return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer") } if !bytes.Equal(b, []byte("null")) { f.Raw = append(f.Raw[0:0], b...) @@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error { var _ json.Marshaler = FieldsV1{} var _ json.Unmarshaler = &FieldsV1{} + +func (f FieldsV1) MarshalCBOR() ([]byte, error) { + if f.Raw == nil { + return cbor.Marshal(nil) + } + if f.getContentType() == fieldsV1InvalidOrValidJSONObject { + var u map[string]interface{} + if err := utiljson.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err) + } + return cbor.Marshal(u) + } + return f.Raw, nil +} + +var cborNull = []byte{0xf6} + +func (f *FieldsV1) UnmarshalCBOR(b []byte) error { + if f == nil { + return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(b, cborNull) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +const ( + // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw + // bytes don't represent an allowable value in any supported encoding. + fieldsV1InvalidOrEmpty = iota + + // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that + // are a valid JSON encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidJSONObject + + // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that + // are a valid CBOR encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidCBORObject +) + +// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject, +// fieldsV1InvalidOrValidCBORObject based on the value of Raw. +// +// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map) +// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty +// and represents an allowable value, then the initial byte unambiguously distinguishes a +// JSON-encoded value from a CBOR-encoded value. +// +// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first +// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid +// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map", +// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or +// 0xc6...0xdb). The two sets of valid initial bytes don't intersect. +func (f FieldsV1) getContentType() int { + if len(f.Raw) > 0 { + p := f.Raw[0] + switch p { + case 'n', '{', '\t', '\r', '\n', ' ': + return fieldsV1InvalidOrValidJSONObject + case 0xf6: // null + return fieldsV1InvalidOrValidCBORObject + default: + if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ { + return fieldsV1InvalidOrValidCBORObject + } + } + } + return fieldsV1InvalidOrEmpty +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 2002f91b0..92d3ed5e0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -59,8 +59,6 @@ type Object interface { SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference SetOwnerReferences([]OwnerReference) - GetClusterName() string - SetClusterName(clusterName string) GetManagedFields() []ManagedFieldsEntry SetManagedFields(managedFields []ManagedFieldsEntry) } @@ -172,8 +170,6 @@ func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return m func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { meta.OwnerReferences = references } -func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } -func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields } func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) { meta.ManagedFields = managedFields diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 8eb37f436..9f302b3f3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error { return nil } +func (t *MicroTime) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(RFC3339Micro, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *MicroTime) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } +func (t MicroTime) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + return cbor.Marshal(t.UTC().Format(RFC3339Micro)) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 421770d43..0333cfdb3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) // Time is a wrapper around time.Time which supports correct @@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error { return nil } +func (t *Time) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(time.RFC3339, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *Time) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +func (t Time) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + + return cbor.Marshal(t.UTC().Format(time.RFC3339)) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (t Time) ToUnstructured() interface{} { if t.IsZero() { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 35f9eca13..473adb9ef 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -59,13 +59,7 @@ type TypeMeta struct { // ListMeta describes metadata that synthetic resources must have, including lists and // various status objects. A resource may have only one of {ObjectMeta, ListMeta}. type ListMeta struct { - // selfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. + // Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` @@ -120,7 +114,7 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -132,10 +126,7 @@ type ObjectMeta struct { // and may be truncated by the length of the suffix required to make the value // unique on the server. // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). + // If this field is specified and the generated name exists, the server will return a 409. // // Applied only if Name is not specified. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency @@ -149,17 +140,11 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. + // Deprecated: selfLink is a legacy read-only field that is no longer populated by the system. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` @@ -169,7 +154,7 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` @@ -233,14 +218,14 @@ type ObjectMeta struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` @@ -251,6 +236,8 @@ type ObjectMeta struct { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` // Must be empty before the object is deleted from the registry. Each entry @@ -268,13 +255,12 @@ type ObjectMeta struct { // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge + // +listType=set Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` + // Tombstone: ClusterName was a legacy field that was always cleared by + // the system and never used. + // ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` // ManagedFields maps workflow-id and version to the set of fields // that are managed by that workflow. This is mostly for internal @@ -285,6 +271,7 @@ type ObjectMeta struct { // workflow used when modifying the object. // // +optional + // +listType=atomic ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } @@ -312,10 +299,10 @@ type OwnerReference struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // If true, this reference points to the managing controller. // +optional @@ -323,6 +310,8 @@ type OwnerReference struct { // If true, AND if the owner has the "foregroundDeletion" finalizer, then // the owner cannot be deleted from the key-value store until this // reference is removed. + // See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + // for how the garbage collector interacts with this field and enforces the foreground deletion. // Defaults to false. // To set this field, a user needs "delete" permission of the owner, // otherwise 422 (Unprocessable Entity) will be returned. @@ -415,8 +404,43 @@ type ListOptions struct { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + SendInitialEvents *bool `json:"sendInitialEvents,omitempty" protobuf:"varint,11,opt,name=sendInitialEvents"` } +const ( + // InitialEventsAnnotationKey the name of the key + // under which an annotation marking the end of + // a watchlist stream is stored. + // + // The annotation is added to a "Bookmark" event. + InitialEventsAnnotationKey = "k8s.io/initial-events-end" +) + // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch // may only be set if resourceVersion is also set. // @@ -520,6 +544,7 @@ type DeleteOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } @@ -545,6 +570,7 @@ type CreateOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // +k8s:deprecated=includeUninitialized,protobuf=2 @@ -555,16 +581,22 @@ type CreateOptions struct { // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` - // fieldValidation determines how the server should respond to - // unknown/duplicate fields in the object in the request. - // Introduced as alpha in 1.23, older servers or servers with the - // `ServerSideFieldValidation` feature disabled will discard valid values - // specified in this param and not perform any server side field validation. - // Valid values are: - // - Ignore: ignores unknown/duplicate fields. - // - Warn: responds with a warning for each - // unknown/duplicate field, but successfully serves the request. - // - Strict: fails the request on unknown/duplicate fields. + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. // +optional FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,4,name=fieldValidation"` } @@ -583,6 +615,7 @@ type PatchOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // Force is going to "force" Apply requests. It means user will @@ -601,16 +634,22 @@ type PatchOptions struct { // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` - // fieldValidation determines how the server should respond to - // unknown/duplicate fields in the object in the request. - // Introduced as alpha in 1.23, older servers or servers with the - // `ServerSideFieldValidation` feature disabled will discard valid values - // specified in this param and not perform any server side field validation. - // Valid values are: - // - Ignore: ignores unknown/duplicate fields. - // - Warn: responds with a warning for each - // unknown/duplicate field, but successfully serves the request. - // - Strict: fails the request on unknown/duplicate fields. + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. // +optional FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,4,name=fieldValidation"` } @@ -628,6 +667,7 @@ type ApplyOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // Force is going to "force" Apply requests. It means user will @@ -660,6 +700,7 @@ type UpdateOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // fieldManager is a name associated with the actor or entity @@ -669,16 +710,22 @@ type UpdateOptions struct { // +optional FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` - // fieldValidation determines how the server should respond to - // unknown/duplicate fields in the object in the request. - // Introduced as alpha in 1.23, older servers or servers with the - // `ServerSideFieldValidation` feature disabled will discard valid values - // specified in this param and not perform any server side field validation. - // Valid values are: - // - Ignore: ignores unknown/duplicate fields. - // - Warn: responds with a warning for each - // unknown/duplicate field, but successfully serves the request. - // - Strict: fails the request on unknown/duplicate fields. + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. // +optional FieldValidation string `json:"fieldValidation,omitempty" protobuf:"bytes,3,name=fieldValidation"` } @@ -722,6 +769,7 @@ type Status struct { // is not guaranteed to conform to any schema except that defined by // the reason type. // +optional + // +listType=atomic Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` // Suggested HTTP return code for this status, 0 if not set. // +optional @@ -749,12 +797,13 @@ type StatusDetails struct { Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional + // +listType=atomic Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` // If specified, the time in seconds before the operation should be retried. Some errors may indicate // the client must take an alternate action - for those errors this field may indicate how long to wait @@ -966,6 +1015,24 @@ const ( // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) // values that can not be handled (e.g. an enumerated string). CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + CauseTypeForbidden CauseType = "FieldValueForbidden" + // CauseTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + CauseTypeTooLong CauseType = "FieldValueTooLong" + // CauseTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + CauseTypeTooMany CauseType = "FieldValueTooMany" + // CauseTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + CauseTypeInternal CauseType = "InternalError" + // CauseTypeTypeInvalid is for the value did not match the schema type for that field + CauseTypeTypeInvalid CauseType = "FieldValueTypeInvalid" // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client // without the expected return type. The presence of this cause indicates the error may be // due to an intervening proxy or the server software malfunctioning. @@ -1000,6 +1067,7 @@ type List struct { type APIVersions struct { TypeMeta `json:",inline"` // versions are the api versions that are available. + // +listType=atomic Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` // a map of client CIDR to server address that is serving this group. // This is to help clients reach servers in the most network-efficient way possible. @@ -1008,6 +1076,7 @@ type APIVersions struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +listType=atomic ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` } @@ -1018,6 +1087,7 @@ type APIVersions struct { type APIGroupList struct { TypeMeta `json:",inline"` // groups is a list of APIGroup. + // +listType=atomic Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` } @@ -1030,6 +1100,7 @@ type APIGroup struct { // name is the name of the group. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // versions are the versions supported in this group. + // +listType=atomic Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` // preferredVersion is the version preferred by the API server, which // probably is the storage version. @@ -1043,6 +1114,7 @@ type APIGroup struct { // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. // +optional + // +listType=atomic ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` } @@ -1087,8 +1159,10 @@ type APIResource struct { // update, patch, delete, deletecollection, and proxy) Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` // shortNames is a list of suggested short names of the resource. + // +listType=atomic ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + // +listType=atomic Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` // The hash value of the storage version, the version this resource is // converted to when written to the data store. Value must be treated @@ -1121,6 +1195,7 @@ type APIResourceList struct { // groupVersion is the group and version this APIResourceList is for. GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` // resources contains the name of the resources and if they are namespaced. + // +listType=atomic APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` } @@ -1128,6 +1203,7 @@ type APIResourceList struct { // For example: "/healthz", "/apis". type RootPaths struct { // paths are the paths available at root. + // +listType=atomic Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -1171,6 +1247,7 @@ type LabelSelector struct { MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +optional + // +listType=atomic MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` } @@ -1178,9 +1255,7 @@ type LabelSelector struct { // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists and DoesNotExist. Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` @@ -1189,6 +1264,7 @@ type LabelSelectorRequirement struct { // the values array must be empty. This array is replaced during a strategic // merge patch. // +optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } @@ -1202,6 +1278,33 @@ const ( LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type FieldSelectorRequirement struct { + // key is the field selector key that the requirement applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"` + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A field selector operator is the set of operators that can be used in a selector requirement. +type FieldSelectorOperator string + +const ( + FieldSelectorOpIn FieldSelectorOperator = "In" + FieldSelectorOpNotIn FieldSelectorOperator = "NotIn" + FieldSelectorOpExists FieldSelectorOperator = "Exists" + FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist" +) + // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource // that the fieldset applies to. type ManagedFieldsEntry struct { @@ -1215,7 +1318,11 @@ type ManagedFieldsEntry struct { // APIVersion field. It is necessary to track the version of a field // set because it cannot be automatically converted. APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` - // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // Time is the timestamp of when the ManagedFields entry was added. The + // timestamp will also be updated if a field is added, the manager + // changes any of the owned fields value or removes a field. The + // timestamp does not update when a field is removed from the entry + // because another manager took it over. // +optional Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` @@ -1286,8 +1393,10 @@ type Table struct { // columnDefinitions describes each column in the returned items array. The number of cells per row // will always match the number of column definitions. + // +listType=atomic ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` // rows is the list of items in the table. + // +listType=atomic Rows []TableRow `json:"rows"` } @@ -1320,12 +1429,14 @@ type TableRow struct { // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a // more detailed description. + // +listType=atomic Cells []interface{} `json:"cells"` // conditions describe additional status of a row that are relevant for a human user. These conditions // apply to the row, not to the object, and will be specific to table output. The only defined // condition type is 'Completed', for a row that indicates a resource that has run to completion and // can be given less visual priority. // +optional + // +listType=atomic Conditions []TableRowCondition `json:"conditions,omitempty"` // This field contains the requested additional information about each object based on the includeObject // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 088ff01f0..1fa37215c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_APIGroup = map[string]string{ @@ -115,7 +115,7 @@ var map_CreateOptions = map[string]string{ "": "CreateOptions may be provided when creating an API object.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -135,6 +135,17 @@ func (DeleteOptions) SwaggerDoc() map[string]string { return map_DeleteOptions } +var map_FieldSelectorRequirement = map[string]string{ + "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the field selector key that the requirement applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", +} + +func (FieldSelectorRequirement) SwaggerDoc() map[string]string { + return map_FieldSelectorRequirement +} + var map_FieldsV1 = map[string]string{ "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } @@ -195,7 +206,7 @@ func (List) SwaggerDoc() map[string]string { var map_ListMeta = map[string]string{ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "selfLink": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", @@ -216,6 +227,7 @@ var map_ListOptions = map[string]string{ "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "sendInitialEvents": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -227,7 +239,7 @@ var map_ManagedFieldsEntry = map[string]string{ "manager": "Manager is an identifier of the workflow managing these fields.", "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", - "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", + "time": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.", "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", "subresource": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.", @@ -239,21 +251,20 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", + "selfLink": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", - "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } @@ -265,10 +276,10 @@ var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "controller": "If true, this reference points to the managing controller.", - "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", } func (OwnerReference) SwaggerDoc() map[string]string { @@ -307,7 +318,7 @@ var map_PatchOptions = map[string]string{ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (PatchOptions) SwaggerDoc() map[string]string { @@ -373,7 +384,7 @@ var map_StatusDetails = map[string]string{ "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", } @@ -452,7 +463,7 @@ var map_UpdateOptions = map[string]string{ "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation determines how the server should respond to unknown/duplicate fields in the object in the request. Introduced as alpha in 1.23, older servers or servers with the `ServerSideFieldValidation` feature disabled will discard valid values specified in this param and not perform any server side field validation. Valid values are: - Ignore: ignores unknown/duplicate fields. - Warn: responds with a warning for each unknown/duplicate field, but successfully serves the request. - Strict: fails the request on unknown/duplicate fields.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index d26c6cff4..0f58d66c0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -173,7 +173,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) } } return strMap, true, nil @@ -340,6 +340,7 @@ func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, if len(gvk.Kind) == 0 { return nil, &gvk, runtime.NewMissingKindErr(string(data)) } + // TODO(109023): require apiVersion here as well return obj, &gvk, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index d1903394d..40d289f37 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -101,6 +101,11 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { return nil } +func (obj *Unstructured) EachListItemWithAlloc(fn func(runtime.Object) error) error { + // EachListItem has allocated a new Object for the user, we can use it directly. + return obj.EachListItem(fn) +} + func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { return make(map[string]interface{}) @@ -444,18 +449,6 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { u.setNestedStringSlice(finalizers, "metadata", "finalizers") } -func (u *Unstructured) GetClusterName() string { - return getNestedString(u.Object, "metadata", "clusterName") -} - -func (u *Unstructured) SetClusterName(clusterName string) { - if len(clusterName) == 0 { - RemoveNestedField(u.Object, "metadata", "clusterName") - return - } - u.setNestedField(clusterName, "metadata", "clusterName") -} - func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { items, found, err := NestedSlice(u.Object, "metadata", "managedFields") if !found || err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index 5028f5fb5..82beda2a2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,6 +52,15 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } +func (u *UnstructuredList) EachListItemWithAlloc(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&Unstructured{Object: u.Items[i].Object}); err != nil { + return err + } + } + return nil +} + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go new file mode 100644 index 000000000..3eba5ba54 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -0,0 +1,359 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "regexp" + "unicode" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options +type LabelSelectorValidationOptions struct { + // Allow invalid label value in selector + AllowInvalidLabelValueInSelector bool + + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool +} + +// LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. +// This is useful for determining whether AllowInvalidLabelValueInSelector should be set to true when validating an update +// based on existing persisted invalid values. +func LabelSelectorHasInvalidLabelValue(ps *metav1.LabelSelector) bool { + if ps == nil { + return false + } + for _, e := range ps.MatchExpressions { + for _, v := range e.Values { + if len(validation.IsValidLabelValue(v)) > 0 { + return true + } + } + } + return false +} + +// ValidateLabelSelector validate the LabelSelector according to the opts and returns any validation errors. +// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. +func ValidateLabelSelector(ps *metav1.LabelSelector, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ps == nil { + return allErrs + } + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + for i, expr := range ps.MatchExpressions { + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, opts, fldPath.Child("matchExpressions").Index(i))...) + } + return allErrs +} + +// ValidateLabelSelectorRequirement validate the requirement according to the opts and returns any validation errors. +// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. +func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch sr.Operator { + case metav1.LabelSelectorOpIn, metav1.LabelSelectorOpNotIn: + if len(sr.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.LabelSelectorOpExists, metav1.LabelSelectorOpDoesNotExist: + if len(sr.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } + } + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + if !opts.AllowInvalidLabelValueInSelector { + for valueIndex, value := range sr.Values { + for _, msg := range validation.IsValidLabelValue(value) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(valueIndex), value, msg)) + } + } + } + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + for _, msg := range validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) + } + } + return allErrs +} + +// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options +type FieldSelectorValidationOptions struct { + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool +} + +// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors. +func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(requirement.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified")) + } + + switch requirement.Operator { + case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn: + if len(requirement.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist: + if len(requirement.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator")) + } + } + + return allErrs +} + +func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { + allErrs := field.ErrorList{} + //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed + if options.OrphanDependents != nil && options.PropagationPolicy != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set")) + } + if options.PropagationPolicy != nil && + *options.PropagationPolicy != metav1.DeletePropagationForeground && + *options.PropagationPolicy != metav1.DeletePropagationBackground && + *options.PropagationPolicy != metav1.DeletePropagationOrphan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) + } + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs +} + +func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs +} + +func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { + allErrs := field.ErrorList{} + if patchType != types.ApplyPatchType { + if options.Force != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) + } + } else { + if options.FieldManager == "" { + // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. + allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) + } + } + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateFieldValidation(field.NewPath("fieldValidation"), options.FieldValidation)...) + return allErrs +} + +var FieldManagerMaxLength = 128 + +// ValidateFieldManager valides that the fieldManager is the proper length and +// only has printable characters. +func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // the field can not be set as a `*string`, so a empty string ("") is + // considered as not set and is defaulted by the rest of the process + // (unless apply is used, in which case it is required). + if len(fieldManager) > FieldManagerMaxLength { + allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + } + // Verify that all characters are printable. + for i, r := range fieldManager { + if !unicode.IsPrint(r) { + allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) + } + } + + return allErrs +} + +var allowedDryRunValues = sets.NewString(metav1.DryRunAll) + +// ValidateDryRun validates that a dryRun query param only contains allowed values. +func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedDryRunValues.HasAll(dryRun...) { + allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) + } + return allErrs +} + +var allowedFieldValidationValues = sets.NewString("", metav1.FieldValidationIgnore, metav1.FieldValidationWarn, metav1.FieldValidationStrict) + +// ValidateFieldValidation validates that a fieldValidation query param only contains allowed values. +func ValidateFieldValidation(fldPath *field.Path, fieldValidation string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedFieldValidationValues.Has(fieldValidation) { + allErrs = append(allErrs, field.NotSupported(fldPath, fieldValidation, allowedFieldValidationValues.List())) + } + return allErrs + +} + +const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` + +// ValidateTableOptions returns any invalid flags on TableOptions. +func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { + var allErrs field.ErrorList + switch opts.IncludeObject { + case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": + default: + allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) + } + return allErrs +} + +const MaxSubresourceNameLength = 256 + +func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for i, fields := range fieldsList { + fldPath := fldPath.Index(i) + switch fields.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`")) + } + if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`")) + } + allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...) + + if len(fields.Subresource) > MaxSubresourceNameLength { + allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength)) + } + } + return allErrs +} + +func ValidateConditions(conditions []metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + conditionTypeToFirstIndex := map[string]int{} + for i, condition := range conditions { + if _, ok := conditionTypeToFirstIndex[condition.Type]; ok { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), condition.Type)) + } else { + conditionTypeToFirstIndex[condition.Type] = i + } + + allErrs = append(allErrs, ValidateCondition(condition, fldPath.Index(i))...) + } + + return allErrs +} + +// validConditionStatuses is used internally to check validity and provide a good message +var validConditionStatuses = sets.NewString(string(metav1.ConditionTrue), string(metav1.ConditionFalse), string(metav1.ConditionUnknown)) + +const ( + maxReasonLen = 1 * 1024 + maxMessageLen = 32 * 1024 +) + +func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // type is set and is a valid format + allErrs = append(allErrs, ValidateLabelName(condition.Type, fldPath.Child("type"))...) + + // status is set and is an accepted value + if !validConditionStatuses.Has(string(condition.Status)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("status"), condition.Status, validConditionStatuses.List())) + } + + if condition.ObservedGeneration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), condition.ObservedGeneration, "must be greater than or equal to zero")) + } + + if condition.LastTransitionTime.IsZero() { + allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set")) + } + + if len(condition.Reason) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set")) + } else { + for _, currErr := range isValidConditionReason(condition.Reason) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) + } + if len(condition.Reason) > maxReasonLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) + } + } + + if len(condition.Message) > maxMessageLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) + } + + return allErrs +} + +const conditionReasonFmt string = "[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?" +const conditionReasonErrMsg string = "a condition reason must start with alphabetic character, optionally followed by a string of alphanumeric characters or '_,:', and must end with an alphanumeric character or '_'" + +var conditionReasonRegexp = regexp.MustCompile("^" + conditionReasonFmt + "$") + +// isValidConditionReason tests for a string that conforms to rules for condition reasons. This checks the format, but not the length. +func isValidConditionReason(value string) []string { + if !conditionReasonRegexp.MatchString(value) { + return []string{validation.RegexError(conditionReasonErrMsg, conditionReasonFmt, "my_name", "MY_NAME", "MyName", "ReasonA,ReasonB", "ReasonA:ReasonB")} + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index b7590f0b3..afe01ed5a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -426,6 +426,13 @@ func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, } else { out.Continue = "" } + if values, ok := map[string][]string(*in)["sendInitialEvents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.SendInitialEvents, s); err != nil { + return err + } + } else { + out.SendInitialEvents = nil + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 418e6099f..90cc54a7e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -327,6 +327,27 @@ func (in *Duration) DeepCopy() *Duration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement. +func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement { + if in == nil { + return nil + } + out := new(FieldSelectorRequirement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in @@ -602,6 +623,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index a5a949679..819d936fe 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +// source: k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_90ec10f86b91f9a8, []int{0} + return fileDescriptor_39237a8d8061b52f, []int{0} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,31 +77,30 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) + proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_39237a8d8061b52f) } -var fileDescriptor_90ec10f86b91f9a8 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30, - 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0, - 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xbf, 0x5d, 0xac, - 0x69, 0x4a, 0xf2, 0xef, 0xc0, 0x9b, 0x1f, 0xc1, 0x8f, 0xb5, 0xe3, 0x8e, 0x03, 0x61, 0xb8, 0xf8, - 0x45, 0x24, 0x5d, 0x15, 0x19, 0x0a, 0xbb, 0xf5, 0x79, 0xca, 0xef, 0x97, 0x27, 0x24, 0x1c, 0xa7, - 0x67, 0x96, 0x49, 0xcd, 0xd3, 0x22, 0x02, 0x93, 0x01, 0x82, 0xe5, 0x33, 0xc8, 0x26, 0xda, 0xf0, - 0xea, 0x87, 0xc8, 0xa5, 0x12, 0xf1, 0x54, 0x66, 0x60, 0x9e, 0x79, 0x9e, 0x26, 0xbe, 0xb0, 0x5c, - 0x01, 0x0a, 0x3e, 0x1b, 0x44, 0x80, 0x62, 0xc0, 0x13, 0xc8, 0xc0, 0x08, 0x84, 0x09, 0xcb, 0x8d, - 0x46, 0xdd, 0x3c, 0xde, 0xa0, 0xec, 0x27, 0xca, 0xf2, 0x34, 0xf1, 0x85, 0x65, 0x1e, 0x65, 0x15, - 0xda, 0xee, 0x27, 0x12, 0xa7, 0x45, 0xc4, 0x62, 0xad, 0x78, 0xa2, 0x13, 0xcd, 0x4b, 0x43, 0x54, - 0x3c, 0x94, 0xa9, 0x0c, 0xe5, 0xd7, 0xc6, 0xdc, 0x3e, 0xd9, 0x65, 0xd4, 0xf6, 0x9e, 0xf6, 0xe9, - 0x5f, 0x94, 0x29, 0x32, 0x94, 0x0a, 0xb8, 0x8d, 0xa7, 0xa0, 0xc4, 0x36, 0x77, 0xf4, 0x46, 0xc2, - 0xc3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0xeb, 0xe8, 0x11, 0x62, 0xbc, 0x02, 0x14, 0x13, 0x81, 0xe2, - 0x52, 0x5a, 0x6c, 0xde, 0x85, 0x75, 0x55, 0xe5, 0xd6, 0xbf, 0x2e, 0xe9, 0x35, 0x86, 0x8c, 0xed, - 0x72, 0x71, 0xe6, 0x69, 0x6f, 0x1a, 0x1d, 0xcc, 0x57, 0x9d, 0xc0, 0xad, 0x3a, 0xf5, 0xaf, 0x66, - 0xfc, 0x6d, 0x6c, 0xde, 0x87, 0x35, 0x89, 0xa0, 0x6c, 0x8b, 0x74, 0xff, 0xf7, 0x1a, 0xc3, 0xf3, - 0xdd, 0xd4, 0xbf, 0xae, 0x1d, 0xed, 0x57, 0xe7, 0xd4, 0x2e, 0xbc, 0x71, 0xbc, 0x11, 0x8f, 0xfa, - 0xf3, 0x35, 0x0d, 0x16, 0x6b, 0x1a, 0x2c, 0xd7, 0x34, 0x78, 0x71, 0x94, 0xcc, 0x1d, 0x25, 0x0b, - 0x47, 0xc9, 0xd2, 0x51, 0xf2, 0xee, 0x28, 0x79, 0xfd, 0xa0, 0xc1, 0xed, 0x5e, 0xf5, 0x52, 0x9f, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x82, 0x5b, 0x80, 0x29, 0x02, 0x00, 0x00, +var fileDescriptor_39237a8d8061b52f = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0xf3, 0x30, + 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x30, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xdb, 0xc1, + 0x84, 0x0d, 0x11, 0xc5, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x31, 0x8b, 0x35, + 0x4d, 0x69, 0xfe, 0x15, 0xbc, 0xf9, 0x11, 0xfc, 0x58, 0x3d, 0xee, 0x38, 0x10, 0x86, 0x8d, 0x5f, + 0x44, 0xd2, 0x56, 0x91, 0xa1, 0xd0, 0x5b, 0x9e, 0x07, 0x7e, 0xbf, 0x3c, 0x81, 0xf8, 0x67, 0xd1, + 0xa9, 0x21, 0x52, 0x53, 0x96, 0x48, 0xc5, 0xc2, 0x95, 0x8c, 0x79, 0xfa, 0x4c, 0x93, 0x48, 0xb8, + 0xc2, 0x50, 0xc5, 0x81, 0xd1, 0xa7, 0x49, 0xc0, 0x81, 0x4d, 0xa8, 0xe0, 0x31, 0x4f, 0x19, 0xf0, + 0x25, 0x49, 0x52, 0x0d, 0xba, 0x3b, 0xae, 0x50, 0xf2, 0x13, 0x25, 0x49, 0x24, 0x5c, 0x61, 0x88, + 0x43, 0x49, 0x8d, 0xf6, 0x8f, 0x84, 0x84, 0x55, 0x16, 0x90, 0x50, 0x2b, 0x2a, 0xb4, 0xd0, 0xb4, + 0x34, 0x04, 0xd9, 0x7d, 0x99, 0xca, 0x50, 0x9e, 0x2a, 0x73, 0xff, 0xb8, 0xc9, 0xa8, 0xdd, 0x3d, + 0xfd, 0x93, 0xbf, 0xa8, 0x34, 0x8b, 0x41, 0x2a, 0x4e, 0x4d, 0xb8, 0xe2, 0x8a, 0xed, 0x72, 0x87, + 0x6f, 0xc8, 0x3f, 0xb8, 0x66, 0x29, 0x48, 0xf6, 0x38, 0x0f, 0x1e, 0x78, 0x08, 0x57, 0x1c, 0xd8, + 0x92, 0x01, 0xbb, 0x94, 0x06, 0xba, 0xb7, 0x7e, 0x5b, 0xd5, 0xb9, 0xf7, 0x6f, 0x88, 0x46, 0x9d, + 0x29, 0x21, 0x4d, 0x1e, 0x4e, 0x1c, 0xed, 0x4c, 0xb3, 0xfd, 0x7c, 0x3b, 0xf0, 0xec, 0x76, 0xd0, + 0xfe, 0x6a, 0x16, 0xdf, 0xc6, 0xee, 0x9d, 0xdf, 0x92, 0xc0, 0x95, 0xe9, 0xa1, 0xe1, 0xff, 0x51, + 0x67, 0x7a, 0xde, 0x4c, 0xfd, 0xeb, 0xda, 0xd9, 0x5e, 0x7d, 0x4f, 0xeb, 0xc2, 0x19, 0x17, 0x95, + 0x78, 0x36, 0xcf, 0x0b, 0xec, 0xad, 0x0b, 0xec, 0x6d, 0x0a, 0xec, 0xbd, 0x58, 0x8c, 0x72, 0x8b, + 0xd1, 0xda, 0x62, 0xb4, 0xb1, 0x18, 0xbd, 0x5b, 0x8c, 0x5e, 0x3f, 0xb0, 0x77, 0x33, 0x6e, 0xfc, + 0x0d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x0f, 0xd7, 0x36, 0x32, 0x02, 0x00, 0x00, } func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index a209dd456..fcec55354 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -25,7 +25,7 @@ import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1beta1"; +option go_package = "k8s.io/apimachinery/pkg/apis/meta/v1beta1"; // PartialObjectMetadataList contains a list of objects containing only their metadata. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -33,9 +33,9 @@ message PartialObjectMetadataList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; // items contains each of the included items. - repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index ef7e7c1e9..dff735dcf 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadataList = map[string]string{ diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index ed51a25e3..76b76247c 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -115,10 +115,10 @@ type ConversionFuncs struct { // previously defined functions. func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error { tA, tB := reflect.TypeOf(a), reflect.TypeOf(b) - if tA.Kind() != reflect.Ptr { + if tA.Kind() != reflect.Pointer { return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", a) } - if tB.Kind() != reflect.Ptr { + if tB.Kind() != reflect.Pointer { return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", b) } c.untyped[typePair{tA, tB}] = fn @@ -179,10 +179,10 @@ func (c *Converter) RegisterGeneratedUntypedConversionFunc(a, b interface{}, fn func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { typeFrom := reflect.TypeOf(from) typeTo := reflect.TypeOf(to) - if reflect.TypeOf(from).Kind() != reflect.Ptr { + if typeFrom.Kind() != reflect.Pointer { return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom) } - if typeTo.Kind() != reflect.Ptr { + if typeTo.Kind() != reflect.Pointer { return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) } c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go index f21abe1e5..25b2923f2 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go @@ -34,3 +34,14 @@ func EqualitiesOrDie(funcs ...interface{}) Equalities { } return e } + +// Performs a shallow copy of the equalities map +func (e Equalities) Copy() Equalities { + result := Equalities{reflect.Equalities{}} + + for key, value := range e.Equalities { + result.Equalities[key] = value + } + + return result +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go index 4ebc1ebc5..7fadd27a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go @@ -26,7 +26,7 @@ import ( // Returns an error if this is not possible. func EnforcePtr(obj interface{}) (reflect.Value, error) { v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { + if v.Kind() != reflect.Pointer { if v.Kind() == reflect.Invalid { return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index 2f0dd0074..b0a9246d9 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -55,7 +55,7 @@ func jsonTag(field reflect.StructField) (string, bool) { } func isPointerKind(kind reflect.Kind) bool { - return kind == reflect.Ptr + return kind == reflect.Pointer } func isStructKind(kind reflect.Kind) bool { @@ -139,7 +139,7 @@ func Convert(obj interface{}) (url.Values, error) { } var sv reflect.Value switch reflect.TypeOf(obj).Kind() { - case reflect.Ptr, reflect.Interface: + case reflect.Pointer, reflect.Interface: sv = reflect.ValueOf(obj).Elem() default: return nil, fmt.Errorf("expecting a pointer or interface") diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 8360d842b..19d823cef 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -77,6 +77,8 @@ func (ls Set) AsValidatedSelector() (Selector, error) { // perform any validation. // According to our measurements this is significantly faster // in codepaths that matter at high scale. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func (ls Set) AsSelectorPreValidated() Selector { return SelectorFromValidatedSet(ls) } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 4c0f25931..9e22a0056 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -22,12 +22,12 @@ import ( "strconv" "strings" - "github.com/google/go-cmp/cmp" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/klog/v2" + stringslices "k8s.io/utils/strings/slices" ) var ( @@ -45,6 +45,19 @@ var ( // Requirements is AND of all requirements. type Requirements []Requirement +func (r Requirements) String() string { + var sb strings.Builder + + for i, requirement := range r { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(requirement.String()) + } + + return sb.String() +} + // Selector represents a label selector. type Selector interface { // Matches returns true if this selector matches the given set of labels. @@ -74,9 +87,12 @@ type Selector interface { RequiresExactMatch(label string) (value string, found bool) } +// Sharing this saves 1 alloc per use; this is safe because it's immutable. +var sharedEverythingSelector Selector = internalSelector{} + // Everything returns a selector that matches all labels. func Everything() Selector { - return internalSelector{} + return sharedEverythingSelector } type nothingSelector struct{} @@ -91,9 +107,12 @@ func (n nothingSelector) RequiresExactMatch(label string) (value string, found b return "", false } +// Sharing this saves 1 alloc per use; this is safe because it's immutable. +var sharedNothingSelector Selector = nothingSelector{} + // Nothing returns a selector that matches no labels func Nothing() Selector { - return nothingSelector{} + return sharedNothingSelector } // NewSelector returns a nil selector @@ -143,14 +162,12 @@ type Requirement struct { // NewRequirement is the constructor for a Requirement. // If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// -// of characters. See validateLabelKey for more details. +// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. +// 2. If the operator is In or NotIn, the values set must be non-empty. +// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// 4. If the operator is Exists or DoesNotExist, the value set must be empty. +// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. // Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList @@ -207,22 +224,15 @@ func (r *Requirement) hasValue(value string) bool { // Matches returns true if the Requirement matches the input Labels. // There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// -// value for that key is in Requirement's value set. -// -// (3) The operator is NotIn, Labels has the Requirement's key and -// -// Labels' value for that key is not in Requirement's value set. -// -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// -// Requirement's key. -// -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// -// the Requirement's key and the corresponding value satisfies mathematical inequality. +// 1. The operator is Exists and Labels has the Requirement's key. +// 2. The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// 3. The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// 4. The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. func (r *Requirement) Matches(ls Labels) bool { switch r.operator { case selection.In, selection.Equals, selection.DoubleEquals: @@ -288,6 +298,13 @@ func (r *Requirement) Values() sets.String { return ret } +// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting. +func (r *Requirement) ValuesUnsorted() []string { + ret := make([]string, 0, len(r.strValues)) + ret = append(ret, r.strValues...) + return ret +} + // Equal checks the equality of requirement. func (r Requirement) Equal(x Requirement) bool { if r.key != x.key { @@ -296,7 +313,7 @@ func (r Requirement) Equal(x Requirement) bool { if r.operator != x.operator { return false } - return cmp.Equal(r.strValues, x.strValues) + return stringslices.Equal(r.strValues, x.strValues) } // Empty returns true if the internalSelector doesn't restrict selection space @@ -866,15 +883,14 @@ func (p *Parser) parseExactValue() (sets.String, error) { // "x in (foo,,baz),y,z notin ()" // // Note: -// -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. +// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// 2. Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// 3. The empty string is a valid VALUE +// 4. A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// 5. A requirement with just !KEY requires that the KEY not exist. func Parse(selector string, opts ...field.PathOption) (Selector, error) { parsedSelector, err := parse(selector, field.ToPath(opts...)) if err == nil { @@ -942,6 +958,8 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. // A nil and empty Sets are considered equivalent to Everything(). // It assumes that Set is already validated and doesn't do any validation. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} @@ -963,3 +981,76 @@ func SelectorFromValidatedSet(ls Set) Selector { func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) { return parse(selector, field.ToPath(opts...)) } + +// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike +// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying +// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered +// equivalent to Everything(). +// +// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these +// constraints are not met, Set.AsValidatedSelector should be preferred +// +// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to +// the less optimized version. +type ValidatedSetSelector Set + +func (s ValidatedSetSelector) Matches(labels Labels) bool { + for k, v := range s { + if !labels.Has(k) || v != labels.Get(k) { + return false + } + } + return true +} + +func (s ValidatedSetSelector) Empty() bool { + return len(s) == 0 +} + +func (s ValidatedSetSelector) String() string { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + // Ensure deterministic output + sort.Strings(keys) + b := strings.Builder{} + for i, key := range keys { + v := s[key] + b.Grow(len(key) + 2 + len(v)) + if i != 0 { + b.WriteString(",") + } + b.WriteString(key) + b.WriteString("=") + b.WriteString(v) + } + return b.String() +} + +func (s ValidatedSetSelector) Add(r ...Requirement) Selector { + return s.toFullSelector().Add(r...) +} + +func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) { + return s.toFullSelector().Requirements() +} + +func (s ValidatedSetSelector) DeepCopySelector() Selector { + res := make(ValidatedSetSelector, len(s)) + for k, v := range s { + res[k] = v + } + return res +} + +func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) { + v, f := s[label] + return v, f +} + +func (s ValidatedSetSelector) toFullSelector() Selector { + return SelectorFromValidatedSet(Set(s)) +} + +var _ Selector = ValidatedSetSelector{} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/allocator.go b/vendor/k8s.io/apimachinery/pkg/runtime/allocator.go new file mode 100644 index 000000000..8bf22ae8a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/allocator.go @@ -0,0 +1,76 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "sync" +) + +// AllocatorPool simply stores Allocator objects to avoid additional memory allocations +// by caching created but unused items for later reuse, relieving pressure on the garbage collector. +// +// Usage: +// +// memoryAllocator := runtime.AllocatorPool.Get().(*runtime.Allocator) +// defer runtime.AllocatorPool.Put(memoryAllocator) +// +// A note for future: +// +// consider introducing multiple pools for storing buffers of different sizes +// perhaps this could allow us to be more efficient. +var AllocatorPool = sync.Pool{ + New: func() interface{} { + return &Allocator{} + }, +} + +// Allocator knows how to allocate memory +// It exists to make the cost of object serialization cheaper. +// In some cases, it allows for allocating memory only once and then reusing it. +// This approach puts less load on GC and leads to less fragmented memory in general. +type Allocator struct { + buf []byte +} + +var _ MemoryAllocator = &Allocator{} + +// Allocate reserves memory for n bytes only if the underlying array doesn't have enough capacity +// otherwise it returns previously allocated block of memory. +// +// Note that the returned array is not zeroed, it is the caller's +// responsibility to clean the memory if needed. +func (a *Allocator) Allocate(n uint64) []byte { + if uint64(cap(a.buf)) >= n { + a.buf = a.buf[:n] + return a.buf + } + // grow the buffer + size := uint64(2*cap(a.buf)) + n + a.buf = make([]byte, size) + a.buf = a.buf[:n] + return a.buf +} + +// SimpleAllocator a wrapper around make([]byte) +// conforms to the MemoryAllocator interface +type SimpleAllocator struct{} + +var _ MemoryAllocator = &SimpleAllocator{} + +func (sa *SimpleAllocator) Allocate(n uint64) []byte { + return make([]byte, n) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 7fc513dd0..73f85286c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -45,7 +45,6 @@ func NewCodec(e Encoder, d Decoder) Codec { // Encode is a convenience wrapper for encoding to a []byte from an Encoder func Encode(e Encoder, obj Object) ([]byte, error) { - // TODO: reuse buffer buf := &bytes.Buffer{} if err := e.Encode(obj, buf); err != nil { return nil, err diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go index 000228061..e88400776 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -30,7 +30,7 @@ import ( // TODO: verify that the correct external version is chosen on encode... func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { if _, err := Encode(c, internalType); err != nil { - return fmt.Errorf("Internal type not encodable: %v", err) + return fmt.Errorf("internal type not encodable: %v", err) } for _, et := range externalTypes { typeMeta := TypeMeta{ diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index b99492a89..62eb27afc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -231,13 +231,13 @@ func (c *fromUnstructuredContext) pushKey(key string) { } -// FromUnstructuredWIthValidation converts an object from map[string]interface{} representation into a concrete type. +// FromUnstructuredWithValidation converts an object from map[string]interface{} representation into a concrete type. // It uses encoding/json/Unmarshaler if object implements it or reflection if not. // It takes a validationDirective that indicates how to behave when it encounters unknown fields. func (c *unstructuredConverter) FromUnstructuredWithValidation(u map[string]interface{}, obj interface{}, returnUnknownFields bool) error { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) - if t.Kind() != reflect.Ptr || value.IsNil() { + if t.Kind() != reflect.Pointer || value.IsNil() { return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) } @@ -291,7 +291,7 @@ func fromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error st, dt := sv.Type(), dv.Type() switch dt.Kind() { - case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface: + case reflect.Map, reflect.Slice, reflect.Pointer, reflect.Struct, reflect.Interface: // Those require non-trivial conversion. default: // This should handle all simple types. @@ -353,7 +353,7 @@ func fromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error return mapFromUnstructured(sv, dv, ctx) case reflect.Slice: return sliceFromUnstructured(sv, dv, ctx) - case reflect.Ptr: + case reflect.Pointer: return pointerFromUnstructured(sv, dv, ctx) case reflect.Struct: return structFromUnstructured(sv, dv, ctx) @@ -465,7 +465,7 @@ func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) e } dv.SetBytes(data) } else { - dv.Set(reflect.Zero(dt)) + dv.Set(reflect.MakeSlice(dt, 0, 0)) } return nil } @@ -496,13 +496,13 @@ func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) e func pointerFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) error { st, dt := sv.Type(), dv.Type() - if st.Kind() == reflect.Ptr && sv.IsNil() { + if st.Kind() == reflect.Pointer && sv.IsNil() { dv.Set(reflect.Zero(dt)) return nil } dv.Set(reflect.New(dt.Elem())) switch st.Kind() { - case reflect.Ptr, reflect.Interface: + case reflect.Pointer, reflect.Interface: return fromUnstructured(sv.Elem(), dv.Elem(), ctx) default: return fromUnstructured(sv, dv.Elem(), ctx) @@ -579,7 +579,7 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte } else { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) - if t.Kind() != reflect.Ptr || value.IsNil() { + if t.Kind() != reflect.Pointer || value.IsNil() { return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) } u = map[string]interface{}{} @@ -686,7 +686,7 @@ func toUnstructured(sv, dv reflect.Value) error { return mapToUnstructured(sv, dv) case reflect.Slice: return sliceToUnstructured(sv, dv) - case reflect.Ptr: + case reflect.Pointer: return pointerToUnstructured(sv, dv) case reflect.Struct: return structToUnstructured(sv, dv) @@ -705,24 +705,13 @@ func mapToUnstructured(sv, dv reflect.Value) error { } if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { if st.Key().Kind() == reflect.String { - switch st.Elem().Kind() { - // TODO It should be possible to reuse the slice for primitive types. - // However, it is panicing in the following form. - // case reflect.String, reflect.Bool, - // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - // sv.Set(sv) - // return nil - default: - // We need to do a proper conversion. - } + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() } - dv.Set(reflect.MakeMap(mapStringInterfaceType)) - dv = dv.Elem() - dt = dv.Type() } if dt.Kind() != reflect.Map { - return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + return fmt.Errorf("cannot convert map to: %v", dt.Kind()) } if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { @@ -763,20 +752,9 @@ func sliceToUnstructured(sv, dv reflect.Value) error { return nil } if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - switch st.Elem().Kind() { - // TODO It should be possible to reuse the slice for primitive types. - // However, it is panicing in the following form. - // case reflect.String, reflect.Bool, - // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - // sv.Set(sv) - // return nil - default: - // We need to do a proper conversion. - dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap())) - dv = dv.Elem() - dt = dv.Type() - } + dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap())) + dv = dv.Elem() + dt = dv.Type() } if dt.Kind() != reflect.Slice { return fmt.Errorf("cannot convert slice to: %v", dt.Kind()) @@ -812,7 +790,7 @@ func isZero(v reflect.Value) bool { case reflect.Map, reflect.Slice: // TODO: It seems that 0-len maps are ignored in it. return v.IsNil() || v.Len() == 0 - case reflect.Ptr, reflect.Interface: + case reflect.Pointer, reflect.Interface: return v.IsNil() } return false diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 9056397fa..60c000bcb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -18,16 +18,77 @@ package runtime import ( "bytes" - "encoding/json" "errors" + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/util/json" ) +// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the +// signature of ToUnstructured does not allow returning an error value in cases where the conversion +// is not possible (content type is unrecognized or bytes don't match content type). +func rawToUnstructured(raw []byte, contentType string) (interface{}, error) { + switch contentType { + case ContentTypeJSON: + var u interface{} + if err := json.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err) + } + return u, nil + case ContentTypeCBOR: + var u interface{} + if err := cbor.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err) + } + return u, nil + default: + return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured") + } +} + +func (re RawExtension) guessContentType() string { + switch { + case bytes.HasPrefix(re.Raw, cborSelfDescribed): + return ContentTypeCBOR + case len(re.Raw) > 0: + switch re.Raw[0] { + case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null. + return ContentTypeJSON + } + } + return "" +} + func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - if !bytes.Equal(in, []byte("null")) { - re.Raw = append(re.Raw[0:0], in...) + if bytes.Equal(in, []byte("null")) { + return nil + } + re.Raw = append(re.Raw[0:0], in...) + return nil +} + +var ( + cborNull = []byte{0xf6} + cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7} +) + +func (re *RawExtension) UnmarshalCBOR(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(in, cborNull) { + if !bytes.HasPrefix(in, cborSelfDescribed) { + // The self-described CBOR tag doesn't change the interpretation of the data + // item it encloses, but it is useful as a magic number. Its encoding is + // also what is used to implement the CBOR RecognizingDecoder. + re.Raw = append(re.Raw[:0], cborSelfDescribed...) + } + re.Raw = append(re.Raw, in...) } return nil } @@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) { } return []byte("null"), nil } - // TODO: Check whether ContentType is actually JSON before returning it. - return re.Raw, nil + + contentType := re.guessContentType() + if contentType == ContentTypeJSON { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return json.Marshal(u) +} + +func (re RawExtension) MarshalCBOR() ([]byte, error) { + if re.Raw == nil { + if re.Object != nil { + return cbor.Marshal(re.Object) + } + return cbor.Marshal(nil) + } + + contentType := re.guessContentType() + if contentType == ContentTypeCBOR { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return cbor.Marshal(u) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index ac428d610..2e40e140a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// source: k8s.io/apimachinery/pkg/runtime/generated.proto package runtime @@ -45,7 +45,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *RawExtension) Reset() { *m = RawExtension{} } func (*RawExtension) ProtoMessage() {} func (*RawExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{0} + return fileDescriptor_2e0e4b920403a48c, []int{0} } func (m *RawExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,7 +73,7 @@ var xxx_messageInfo_RawExtension proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{1} + return fileDescriptor_2e0e4b920403a48c, []int{1} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -101,7 +101,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *Unknown) Reset() { *m = Unknown{} } func (*Unknown) ProtoMessage() {} func (*Unknown) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{2} + return fileDescriptor_2e0e4b920403a48c, []int{2} } func (m *Unknown) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,35 +133,34 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) -} - -var fileDescriptor_9d3c45d7f546725c = []byte{ - // 378 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, - 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, - 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, - 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, - 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, - 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, - 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, - 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, - 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, - 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, - 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, - 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, - 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, - 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, - 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, - 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, - 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, - 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, - 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, - 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, - 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, - 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, - 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_2e0e4b920403a48c) +} + +var fileDescriptor_2e0e4b920403a48c = []byte{ + // 365 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x6b, 0x22, 0x31, + 0x18, 0xc6, 0x27, 0x2a, 0xe8, 0x46, 0xc1, 0x25, 0x7b, 0xd8, 0xd9, 0x3d, 0x64, 0xc4, 0xd3, 0x7a, + 0xd8, 0x0c, 0x08, 0x85, 0x5e, 0x1d, 0xf1, 0x50, 0x4a, 0xa1, 0x84, 0xfe, 0x81, 0x9e, 0x1a, 0x67, + 0xd2, 0x31, 0x0c, 0x26, 0xc3, 0x18, 0x99, 0x7a, 0xeb, 0x47, 0xe8, 0xc7, 0xf2, 0xe8, 0xd1, 0x93, + 0xd4, 0xe9, 0x87, 0xe8, 0xb5, 0x18, 0xa3, 0xb5, 0xed, 0xc1, 0x5b, 0xde, 0xf7, 0x79, 0x7e, 0xcf, + 0xfb, 0xbe, 0x10, 0xe8, 0x27, 0xa7, 0x13, 0x22, 0x94, 0xcf, 0x52, 0x31, 0x66, 0xe1, 0x48, 0x48, + 0x9e, 0xcd, 0xfc, 0x34, 0x89, 0xfd, 0x6c, 0x2a, 0xb5, 0x18, 0x73, 0x3f, 0xe6, 0x92, 0x67, 0x4c, + 0xf3, 0x88, 0xa4, 0x99, 0xd2, 0x0a, 0x79, 0x5b, 0x80, 0x1c, 0x02, 0x24, 0x4d, 0x62, 0x62, 0x81, + 0xbf, 0xff, 0x63, 0xa1, 0x47, 0xd3, 0x21, 0x09, 0xd5, 0xd8, 0x8f, 0x55, 0xac, 0x7c, 0xc3, 0x0d, + 0xa7, 0x0f, 0xa6, 0x32, 0x85, 0x79, 0x6d, 0xf3, 0xda, 0x1d, 0xd8, 0xa0, 0x2c, 0x1f, 0x3c, 0x6a, + 0x2e, 0x27, 0x42, 0x49, 0xf4, 0x07, 0x96, 0x33, 0x96, 0xbb, 0xa0, 0x05, 0xfe, 0x35, 0x82, 0x6a, + 0xb1, 0xf2, 0xca, 0x94, 0xe5, 0x74, 0xd3, 0x6b, 0xdf, 0xc3, 0xda, 0xd5, 0x2c, 0xe5, 0x17, 0x5c, + 0x33, 0xd4, 0x85, 0x90, 0xa5, 0xe2, 0x86, 0x67, 0x1b, 0xc8, 0xb8, 0x7f, 0x04, 0x68, 0xbe, 0xf2, + 0x9c, 0x62, 0xe5, 0xc1, 0xde, 0xe5, 0x99, 0x55, 0xe8, 0x81, 0x0b, 0xb5, 0x60, 0x25, 0x11, 0x32, + 0x72, 0x4b, 0xc6, 0xdd, 0xb0, 0xee, 0xca, 0xb9, 0x90, 0x11, 0x35, 0x4a, 0xfb, 0x0d, 0xc0, 0xea, + 0xb5, 0x4c, 0xa4, 0xca, 0x25, 0xba, 0x85, 0x35, 0x6d, 0xa7, 0x99, 0xfc, 0x7a, 0xb7, 0x43, 0x8e, + 0xdc, 0x4e, 0x76, 0xeb, 0x05, 0x3f, 0x6d, 0xf8, 0x7e, 0x61, 0xba, 0x0f, 0xdb, 0x5d, 0x58, 0xfa, + 0x7e, 0x21, 0xea, 0xc1, 0x66, 0xa8, 0xa4, 0xe6, 0x52, 0x0f, 0x64, 0xa8, 0x22, 0x21, 0x63, 0xb7, + 0x6c, 0x96, 0xfd, 0x6d, 0xf3, 0x9a, 0xfd, 0xcf, 0x32, 0xfd, 0xea, 0x47, 0x27, 0xb0, 0x6e, 0x5b, + 0x9b, 0xd1, 0x6e, 0xc5, 0xe0, 0xbf, 0x2c, 0x5e, 0xef, 0x7f, 0x48, 0xf4, 0xd0, 0x17, 0x0c, 0xe6, + 0x6b, 0xec, 0x2c, 0xd6, 0xd8, 0x59, 0xae, 0xb1, 0xf3, 0x54, 0x60, 0x30, 0x2f, 0x30, 0x58, 0x14, + 0x18, 0x2c, 0x0b, 0x0c, 0x5e, 0x0a, 0x0c, 0x9e, 0x5f, 0xb1, 0x73, 0xe7, 0x1d, 0xf9, 0x2d, 0xef, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x9b, 0x09, 0xb3, 0x4f, 0x02, 0x00, 0x00, } func (m *RawExtension) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 347105649..5f06cc574 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -22,7 +22,7 @@ syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime; // Package-wide variables from generator "generated". -option go_package = "runtime"; +option go_package = "k8s.io/apimachinery/pkg/runtime"; // RawExtension is used to hold extensions in external versions. // diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 7bd1a3a6a..cc0a77bba 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -236,10 +236,14 @@ func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { gvk = preferredGVK } } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err + + // The gvk only needs to be set if not already as desired. + if gvk != oldGVK { + kind.SetGroupVersionKind(gvk) + defer kind.SetGroupVersionKind(oldGVK) + } + + return e.Encoder.Encode(obj, stream) } // WithoutVersionDecoder clears the group version kind of a deserialized object. @@ -257,3 +261,26 @@ func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersion } return obj, gvk, err } + +type encoderWithAllocator struct { + encoder EncoderWithAllocator + memAllocator MemoryAllocator +} + +// NewEncoderWithAllocator returns a new encoder +func NewEncoderWithAllocator(e EncoderWithAllocator, a MemoryAllocator) Encoder { + return &encoderWithAllocator{ + encoder: e, + memAllocator: a, + } +} + +// Encode writes the provided object to the nested writer +func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { + return e.encoder.EncodeWithAllocator(obj, w, e.memAllocator) +} + +// Identifier returns identifier of this encoder. +func (e *encoderWithAllocator) Identifier() Identifier { + return e.encoder.Identifier() +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index b324b76c6..e89ea8939 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -69,6 +69,24 @@ type Encoder interface { Identifier() Identifier } +// MemoryAllocator is responsible for allocating memory. +// By encapsulating memory allocation into its own interface, we can reuse the memory +// across many operations in places we know it can significantly improve the performance. +type MemoryAllocator interface { + // Allocate reserves memory for n bytes. + // Note that implementations of this method are not required to zero the returned array. + // It is the caller's responsibility to clean the memory if needed. + Allocate(n uint64) []byte +} + +// EncoderWithAllocator serializes objects in a way that allows callers to manage any additional memory allocations. +type EncoderWithAllocator interface { + Encoder + // EncodeWithAllocator writes an object to a stream as Encode does. + // In addition, it allows for providing a memory allocator for efficient memory usage during object serialization + EncodeWithAllocator(obj Object, w io.Writer, memAlloc MemoryAllocator) error +} + // Decoder attempts to load an object from data. type Decoder interface { // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the @@ -153,7 +171,7 @@ type NegotiatedSerializer interface { // EncoderForVersion returns an encoder that ensures objects being written to the provided // serializer are in the provided group version. EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder - // DecoderForVersion returns a decoder that ensures objects being read by the provided + // DecoderToVersion returns a decoder that ensures objects being read by the provided // serializer are in the provided group version by default. DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder } @@ -207,6 +225,12 @@ type NestedObjectEncoder interface { // NestedObjectDecoder is an optional interface that objects may implement to be given // an opportunity to decode any nested Objects / RawExtensions during serialization. +// It is possible for DecodeNestedObjects to return a non-nil error but for the decoding +// to have succeeded in the case of strict decoding errors (e.g. unknown/duplicate fields). +// As such it is important for callers of DecodeNestedObjects to check to confirm whether +// an error is a runtime.StrictDecodingError before short circuiting. +// Similarly, implementations of DecodeNestedObjects should ensure that a runtime.StrictDecodingError +// is only returned when the rest of decoding has succeeded. type NestedObjectDecoder interface { DecodeNestedObjects(d Decoder) error } @@ -284,14 +308,11 @@ type ResourceVersioner interface { ResourceVersion(obj Object) (string, error) } -// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. -type SelfLinker interface { - SetSelfLink(obj Object, selfLink string) error - SelfLink(obj Object) (string, error) - - // Knowing Name is sometimes necessary to use a SelfLinker. +// Namer provides methods for retrieving name and namespace of an API object. +type Namer interface { + // Name returns the name of a given object. Name(obj Object) (string, error) - // Knowing Namespace is sometimes necessary to use a SelfLinker + // Namespace returns the name of a given object. Namespace(obj Object) (string, error) } @@ -344,4 +365,9 @@ type Unstructured interface { // error should terminate the iteration. If IsList() returns false, this method should return an error // instead of calling the provided function. EachListItem(func(Object) error) error + // EachListItemWithAlloc works like EachListItem, but avoids retaining references to a slice of items. + // It does this by making a shallow copy of non-pointer items before passing them to fn. + // + // If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. + EachListItemWithAlloc(func(Object) error) error } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 29d3ac45b..7a26d2798 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// source: k8s.io/apimachinery/pkg/runtime/schema/generated.proto package schema @@ -39,21 +39,20 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_25f8f0eed21c6089) } -var fileDescriptor_0462724132518e0d = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, - 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, - 0x5e, 0xe6, 0x24, 0x57, 0xc7, 0xb2, 0xfc, 0x47, 0x8e, 0x5d, 0xa9, 0xac, 0x8f, 0xd0, 0xc7, 0x0a, - 0x0c, 0x0c, 0x6c, 0xdc, 0x17, 0xa9, 0x64, 0x07, 0x94, 0xdd, 0x4f, 0xa7, 0xcf, 0xf7, 0xf3, 0x68, - 0xfe, 0x27, 0xa1, 0x3d, 0x9a, 0xdc, 0x51, 0x74, 0x94, 0x68, 0xc2, 0x0b, 0xb9, 0xc1, 0x47, 0xdc, - 0x1f, 0x32, 0x68, 0x2b, 0xfb, 0x51, 0x3b, 0x8a, 0x57, 0x0c, 0x46, 0x61, 0xcc, 0x2e, 0x69, 0x4b, - 0x38, 0xf5, 0x23, 0x59, 0x89, 0x8a, 0x1c, 0x45, 0x99, 0x68, 0x10, 0x21, 0xfa, 0xe4, 0xbf, 0x7e, - 0x9a, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0x44, 0x73, 0xdf, 0x7f, 0x4a, 0xa7, 0x31, 0x77, - 0xa2, 0xf7, 0x16, 0x95, 0x57, 0x1e, 0x2b, 0xef, 0xf2, 0xb9, 0xae, 0x3a, 0xea, 0xd5, 0xb2, 0x87, - 0xdf, 0x79, 0x03, 0xb6, 0x6c, 0xc0, 0xd6, 0x0d, 0xd8, 0xad, 0x00, 0x9f, 0x0b, 0xf0, 0xa5, 0x00, - 0x5f, 0x0b, 0xf0, 0x47, 0x01, 0x7e, 0x7f, 0x02, 0x3b, 0x7d, 0xb4, 0xf8, 0x2b, 0x00, 0x00, 0xff, - 0xff, 0xba, 0x7e, 0x65, 0xf4, 0xd6, 0x00, 0x00, 0x00, +var fileDescriptor_25f8f0eed21c6089 = []byte{ + // 170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xa1, 0x0e, 0xc2, 0x30, + 0x10, 0xc6, 0xf1, 0xd6, 0x22, 0x91, 0x88, 0x93, 0x73, 0xdc, 0x39, 0x82, 0x46, 0xf3, 0x04, 0xb8, + 0x6e, 0x94, 0xae, 0x59, 0xba, 0x6b, 0xba, 0x4e, 0xe0, 0x78, 0x04, 0x1e, 0x6b, 0x72, 0x72, 0x92, + 0x95, 0x17, 0x21, 0x69, 0x11, 0x48, 0xdc, 0xfd, 0xc5, 0xef, 0xf2, 0x6d, 0x0e, 0xdd, 0x71, 0x40, + 0xcb, 0xa4, 0xbc, 0x75, 0xaa, 0x69, 0x6d, 0xaf, 0xc3, 0x9d, 0x7c, 0x67, 0x28, 0x8c, 0x7d, 0xb4, + 0x4e, 0xd3, 0xd0, 0xb4, 0xda, 0x29, 0x32, 0xba, 0xd7, 0x41, 0x45, 0x7d, 0x45, 0x1f, 0x38, 0xf2, + 0xb6, 0x2a, 0x0e, 0x7f, 0x1d, 0xfa, 0xce, 0xe0, 0xd7, 0x61, 0x71, 0xbb, 0xbd, 0xb1, 0xb1, 0x1d, + 0x6b, 0x6c, 0xd8, 0x91, 0x61, 0xc3, 0x94, 0x79, 0x3d, 0xde, 0x72, 0xe5, 0xc8, 0x57, 0x79, 0x7b, + 0x3a, 0x4f, 0x2b, 0x88, 0x79, 0x05, 0xb1, 0xac, 0x20, 0x1e, 0x09, 0xe4, 0x94, 0x40, 0xce, 0x09, + 0xe4, 0x92, 0x40, 0xbe, 0x12, 0xc8, 0xe7, 0x1b, 0xc4, 0xa5, 0xfa, 0x6f, 0xf4, 0x27, 0x00, 0x00, + 0xff, 0xff, 0x97, 0xb8, 0x4d, 0x1f, 0xdd, 0x00, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto index c50766a4b..01a9c01e5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -22,5 +22,5 @@ syntax = "proto2"; package k8s.io.apimachinery.pkg.runtime.schema; // Package-wide variables from generator "generated". -option go_package = "schema"; +option go_package = "k8s.io/apimachinery/pkg/runtime/schema"; diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index b21eb664e..d1c37c942 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -39,7 +39,7 @@ func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { // ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com` // and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended // but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then -// `*GroupVersionResource` is nil. +// `*GroupVersionKind` is nil. // `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind` func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) { var gvk *GroupVersionKind @@ -191,8 +191,7 @@ func (gv GroupVersion) Identifier() string { // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { for _, gvk := range kinds { if gvk.Group == gv.Group && gvk.Version == gv.Version { @@ -240,8 +239,7 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // GroupVersions can be used to represent a set of desired group versions. // TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. type GroupVersions []GroupVersion // Identifier implements runtime.GroupVersioner interface. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 2e62dd62b..a5b116718 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -44,11 +44,11 @@ import ( // Schemes are not expected to change at runtime and are only threadsafe after // registration is complete. type Scheme struct { - // versionMap allows one to figure out the go type of an object with + // gvkToType allows one to figure out the go type of an object with // the given version and name. gvkToType map[schema.GroupVersionKind]reflect.Type - // typeToGroupVersion allows one to find metadata for a given go object. + // typeToGVK allows one to find metadata for a given go object. // The reflect.Type we index by should *not* be a pointer. typeToGVK map[reflect.Type][]schema.GroupVersionKind @@ -64,7 +64,7 @@ type Scheme struct { // resource field labels in that version to internal version. fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc - // defaulterFuncs is an array of interfaces to be called with an object to provide defaulting + // defaulterFuncs is a map to funcs to be called with an object to provide defaulting // the provided object must be a pointer. defaulterFuncs map[reflect.Type]func(interface{}) @@ -118,8 +118,7 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// -// every version with particular schemas. Resolve this method at that point. +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.addObservedVersion(version) s.AddKnownTypes(version, types...) @@ -142,7 +141,7 @@ func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { s.addObservedVersion(gv) for _, obj := range types { t := reflect.TypeOf(obj) - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { panic("All types must be pointers to structs.") } t = t.Elem() @@ -160,7 +159,7 @@ func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { if len(gvk.Version) == 0 { panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) } - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { panic("All types must be pointers to structs.") } t = t.Elem() @@ -463,7 +462,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( } else { // determine the incoming kinds with as few allocations as possible. t = reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { return nil, fmt.Errorf("only pointer types may be converted: %v", t) } t = t.Elem() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go new file mode 100644 index 000000000..cd78b1df2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and +// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular, +// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions. +package direct + +import ( + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" +) + +func Marshal(src interface{}) ([]byte, error) { + return modes.Encode.Marshal(src) +} + +func Unmarshal(src []byte, dst interface{}) error { + return modes.Decode.Unmarshal(src, dst) +} + +func Diagnose(src []byte) (string, error) { + return modes.Diagnostic.Diagnose(src) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go new file mode 100644 index 000000000..f14cbd6b5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "bytes" + "sync" +) + +var buffers = BufferProvider{p: new(sync.Pool)} + +type buffer struct { + bytes.Buffer +} + +type pool interface { + Get() interface{} + Put(interface{}) +} + +type BufferProvider struct { + p pool +} + +func (b *BufferProvider) Get() *buffer { + if buf, ok := b.p.Get().(*buffer); ok { + return buf + } + return &buffer{} +} + +func (b *BufferProvider) Put(buf *buffer) { + if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ { + // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption + // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as + // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small" + // objects very frequently and much larger objects (especially large lists) only + // occasionally. Under steady load, pooled buffers tend to be borrowed frequently + // enough to prevent them from being released. Over time, each buffer is used to + // encode a large object and its capacity increases accordingly. The result is that + // practically all buffers in the pool retain much more capacity than needed to + // encode most objects. + + // As a basic mitigation for the worst case, buffers with more capacity than the + // default max request body size are never returned to the pool. + // TODO: Optimize for higher buffer utilization. + return + } + buf.Reset() + b.p.Put(buf) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go new file mode 100644 index 000000000..858529e95 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -0,0 +1,422 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "encoding" + "encoding/json" + "errors" + "fmt" + "reflect" + "sync" + + "github.com/fxamacker/cbor/v2" +) + +// Returns a non-nil error if and only if the argument's type (or one of its component types, for +// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing +// cbor.Marshaler and likewise for the respective Unmarshaler interfaces. +// +// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic +// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be +// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree +// types in client programs. +func RejectCustomMarshalers(v interface{}) error { + if v == nil { + return nil + } + rv := reflect.ValueOf(v) + if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + return nil +} + +// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the +// decoder shouldn't be able to contain cycles, but practically any object can be passed to the +// encoder. +var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)") + +// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing +// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of +// this mechanism. +const maxDepth = 2048 + +var marshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Marshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Marshaler](), + reflect.TypeFor[encoding.TextMarshaler](), + }, +} + +var unmarshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Unmarshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Unmarshaler](), + reflect.TypeFor[encoding.TextUnmarshaler](), + }, + assumeAddressableValues: true, +} + +// checker wraps a function for dynamically checking a value of a specific type for custom JSON +// behaviors not matched by a custom CBOR behavior. +type checker struct { + // check returns a non-nil error if the given value might be marshalled to or from CBOR + // using the default behavior for its kind, but marshalled to or from JSON using custom + // behavior. + check func(rv reflect.Value, depth int) error + + // safe returns true if all values of this type are safe from mismatched custom marshalers. + safe func() bool +} + +// TODO: stale +// Having a single addressable checker for comparisons lets us prune and collapse parts of the +// object traversal that are statically known to be safe. Depending on the type, it may be +// unnecessary to inspect each value of that type. For example, no value of the built-in type bool +// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a +// distinct type from bool). +var noop = checker{ + safe: func() bool { + return true + }, + check: func(rv reflect.Value, depth int) error { + return nil + }, +} + +type checkers struct { + m sync.Map // reflect.Type => *checker + + cborInterface reflect.Type + nonCBORInterfaces []reflect.Type + + assumeAddressableValues bool +} + +func (cache *checkers) getChecker(rt reflect.Type) checker { + if ptr, ok := cache.m.Load(rt); ok { + return *ptr.(*checker) + } + + return cache.getCheckerInternal(rt, nil) +} + +// linked list node representing the path from a composite type to an element type +type path struct { + Type reflect.Type + Parent *path +} + +func (p path) cyclic(rt reflect.Type) bool { + for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent { + if ancestor.Type == rt { + return true + } + } + return false +} + +func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) { + // Store a placeholder cache entry first to handle cyclic types. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + c = checker{ + safe: func() bool { + wg.Wait() + return c.safe() + }, + check: func(rv reflect.Value, depth int) error { + wg.Wait() + return c.check(rv, depth) + }, + } + if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + // Someone else stored an entry for this type, use it. + return *actual.(*checker) + } + + // Take a nonreflective path for the unstructured container types. They're common and + // usually nested inside one another. + switch rt { + case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}](): + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + return checkUnstructuredValue(cache, rv.Interface(), depth) + }, + } + } + + // It's possible that one of the relevant interfaces is implemented on a type with a pointer + // receiver, but that a particular value of that type is not addressable. For example: + // + // func (Foo) MarshalText() ([]byte, error) { ... } + // func (*Foo) MarshalCBOR() ([]byte, error) { ... } + // + // Both methods are in the method set of *Foo, but the method set of Foo contains only + // MarshalText. + // + // Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text + // interface with a pointer receiver are always accessible. Only the unmarshaler check + // assumes that CBOR methods with pointer receivers are accessible. + + if rt.Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if rt.Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if reflect.PointerTo(rt).Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + self := &path{Type: rt, Parent: parent} + + switch rt.Kind() { + case reflect.Array: + ce := cache.getCheckerInternal(rt.Elem(), self) + rtlen := rt.Len() + if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rtlen; i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Interface: + // All interface values have to be checked because their dynamic type might + // implement one of the interesting interfaces or be composed of another type that + // does. + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + // Unpacking interfaces must count against recursion depth, + // consider this cycle: + // > var i interface{} + // > var p *interface{} = &i + // > i = p + // > rv := reflect.ValueOf(i) + // > for { + // > rv = rv.Elem() + // > } + if depth <= 0 { + return errMaxDepthExceeded + } + rv = rv.Elem() + return cache.getChecker(rv.Type()).check(rv, depth-1) + }, + } + + case reflect.Map: + rtk := rt.Key() + ck := cache.getCheckerInternal(rtk, self) + rte := rt.Elem() + ce := cache.getCheckerInternal(rte, self) + if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := ck.check(rvk, depth-1); err != nil { + return err + } + rve.SetIterValue(iter) + if err := ce.check(rve, depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Pointer: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + if depth <= 0 { + return errMaxDepthExceeded + } + return ce.check(rv.Elem(), depth-1) + }, + } + + case reflect.Slice: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rv.Len(); i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Struct: + type field struct { + Index int + Checker checker + } + var fields []field + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + cf := cache.getCheckerInternal(f.Type, self) + if !self.cyclic(f.Type) && cf.safe() { + continue + } + fields = append(fields, field{Index: i, Checker: cf}) + } + if len(fields) == 0 { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for _, fi := range fields { + if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil { + return err + } + } + return nil + }, + } + + default: + // Not a serializable composite type (funcs and channels are composite types but are + // rejected by JSON and CBOR serialization). + return noop + + } +} + +func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error { + switch v := v.(type) { + case nil, bool, int64, float64, string: + return nil + case []interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + case map[string]interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + default: + // Unmarshaling an unstructured doesn't use other dynamic types, but nothing + // prevents inserting values with arbitrary dynamic types into unstructured content, + // as long as they can be marshalled. + rv := reflect.ValueOf(v) + return cache.getChecker(rv.Type()).check(rv, depth) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go new file mode 100644 index 000000000..895b0deff --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "reflect" + + "github.com/fxamacker/cbor/v2" +) + +var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry { + var opts []func(*cbor.SimpleValueRegistry) error + for sv := 0; sv <= 255; sv++ { + // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved + // and considered ill-formed by the CBOR specification. We only accept false (20), + // true (21), and null (22). + switch sv { + case 20: // false + case 21: // true + case 22: // null + case 24, 25, 26, 27, 28, 29, 30, 31: // reserved + default: + opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv))) + } + } + simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...) + if err != nil { + panic(err) + } + return simpleValues +}() + +var Decode cbor.DecMode = func() cbor.DecMode { + decode, err := cbor.DecOptions{ + // Maps with duplicate keys are well-formed but invalid according to the CBOR spec + // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map + // keys are rejected outright and not surfaced as a strict decoding error. + DupMapKey: cbor.DupMapKeyEnforcedAPF, + + // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted + // with or without tagging. If a tag number is present, it must be valid. + TimeTag: cbor.DecTagOptional, + + // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit + // is 10000. + MaxNestedLevels: 64, + + MaxArrayElements: 1024, + MaxMapPairs: 1024, + + // Indefinite-length sequences aren't produced by this serializer, but other + // implementations can. + IndefLength: cbor.IndefLengthAllowed, + + // Accept inputs that contain CBOR tags. + TagsMd: cbor.TagsAllowed, + + // Decode type 0 (unsigned integer) as int64. + // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64. + IntDec: cbor.IntDecConvertSignedOrFail, + + // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for + // decodes into interface{}. + MapKeyByteString: cbor.MapKeyByteStringForbidden, + + // Error on map keys that don't map to a field in the destination struct. + ExtraReturnErrors: cbor.ExtraDecErrorUnknownField, + + // Decode maps into concrete type map[string]interface{} when the destination is an + // interface{}. + DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)), + + // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but + // invalid according to the CBOR spec. Reject invalid inputs. Encoders are + // responsible for ensuring that all text strings they produce contain valid UTF-8 + // sequences and may use the byte string major type to encode strings that have not + // been validated. + UTF8: cbor.UTF8RejectInvalid, + + // Never make a case-insensitive match between a map key and a struct field. + FieldNameMatching: cbor.FieldNameMatchingCaseSensitive, + + // Produce string concrete values when decoding a CBOR byte string into interface{}. + DefaultByteStringType: reflect.TypeOf(""), + + // Allow CBOR byte strings to be decoded into string destination values. If a byte + // string is enclosed in an "expected later encoding" tag + // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text + // encoding indicated by that tag (e.g. base64) will be applied to the contents of + // the byte string. + ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding, + + // Allow CBOR byte strings to match struct fields when appearing as a map key. + FieldNameByteString: cbor.FieldNameByteStringAllowed, + + // When decoding an unrecognized tag to interface{}, return the decoded tag content + // instead of the default, a cbor.Tag representing a (number, content) pair. + UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny, + + // Decode time tags to interface{} as strings containing RFC 3339 timestamps. + TimeTagToAny: cbor.TimeTagToRFC3339Nano, + + // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339 + // timestamps. + ByteStringToTime: cbor.ByteStringToTimeAllowed, + + // Reject NaN and infinite floating-point values since they don't have a JSON + // representation (RFC 8259 Section 6). + NaN: cbor.NaNDecodeForbidden, + Inf: cbor.InfDecodeForbidden, + + // When unmarshaling a byte string into a []byte, assume that the byte string + // contains base64-encoded bytes, unless explicitly counterindicated by an "expected + // later encoding" tag. This is consistent with the because of unmarshaling a JSON + // text into a []byte. + ByteStringExpectedFormat: cbor.ByteStringExpectedBase64, + + // Reject the arbitrary-precision integer tags because they can't be faithfully + // roundtripped through the allowable Unstructured types. + BignumTag: cbor.BignumTagForbidden, + + // Reject anything other than the simple values true, false, and null. + SimpleValues: simpleValues, + + // Disable default recognition of types implementing encoding.BinaryUnmarshaler, + // which is not recognized for JSON decoding. + BinaryUnmarshaler: cbor.BinaryUnmarshalerNone, + }.DecMode() + if err != nil { + panic(err) + } + return decode +}() + +// DecodeLax is derived from Decode, but does not complain about unknown fields in the input. +var DecodeLax cbor.DecMode = func() cbor.DecMode { + opts := Decode.DecOptions() + opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit + dm, err := opts.DecMode() + if err != nil { + panic(err) + } + return dm +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go new file mode 100644 index 000000000..61f3f145f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "github.com/fxamacker/cbor/v2" +) + +var Diagnostic cbor.DiagMode = func() cbor.DiagMode { + opts := Decode.DecOptions() + diagnostic, err := cbor.DiagOptions{ + ByteStringText: true, + + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.DiagMode() + if err != nil { + panic(err) + } + return diagnostic +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go new file mode 100644 index 000000000..c66931384 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go @@ -0,0 +1,155 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "io" + + "github.com/fxamacker/cbor/v2" +) + +var Encode = EncMode{ + delegate: func() cbor.UserBufferEncMode { + encode, err := cbor.EncOptions{ + // Map keys need to be sorted to have deterministic output, and this is the order + // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements". + Sort: cbor.SortBytewiseLexical, + + // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store + // floats in the smallest width that preserves value so that equivalent float32 and + // float64 values encode to identical bytes, as they do in a JSON + // encoding. Satisfies one of the "Core Deterministic Encoding Requirements". + ShortestFloat: cbor.ShortestFloat16, + + // Error on attempt to encode NaN and infinite values. This is what the JSON + // serializer does. + NaNConvert: cbor.NaNConvertReject, + InfConvert: cbor.InfConvertReject, + + // Error on attempt to encode math/big.Int values, which can't be faithfully + // roundtripped through Unstructured in general (the dynamic numeric types allowed + // in Unstructured are limited to float64 and int64). + BigIntConvert: cbor.BigIntConvertReject, + + // MarshalJSON for time.Time writes RFC3339 with nanos. + Time: cbor.TimeRFC3339Nano, + + // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by + // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no + // reliable way of knowing that a particular string originated from serializing a + // time.Time), so producing tag 0 has little use. + TimeTag: cbor.EncTagNone, + + // Indefinite-length items have multiple encodings and aren't being used anyway, so + // disable to avoid an opportunity for nondeterminism. + IndefLength: cbor.IndefLengthForbidden, + + // Preserve distinction between nil and empty for slices and maps. + NilContainers: cbor.NilContainerAsNull, + + // OK to produce tags. + TagsMd: cbor.TagsAllowed, + + // Use the same definition of "empty" as encoding/json. + OmitEmpty: cbor.OmitEmptyGoValue, + + // The CBOR types text string and byte string are structurally equivalent, with the + // semantic difference that a text string whose content is an invalid UTF-8 sequence + // is itself invalid. We reject all invalid text strings at decode time and do not + // validate or sanitize all Go strings at encode time. Encoding Go strings to the + // byte string type is comparable to the existing Protobuf behavior and cheaply + // ensures that the output is valid CBOR. + String: cbor.StringToByteString, + + // Encode struct field names to the byte string type rather than the text string + // type. + FieldName: cbor.FieldNameToByteString, + + // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte + // strings. + ByteArray: cbor.ByteArrayToArray, + + // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64 + // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to + // interoperate with the existing JSON behavior. This indicates to the decoder that, + // when decoding into a string (or unstructured), the resulting value should be the + // base64 encoding of the original bytes. No base64 encoding or decoding needs to be + // performed for []byte-to-CBOR-to-[]byte roundtrips. + ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64, + + // Disable default recognition of types implementing encoding.BinaryMarshaler, which + // is not recognized for JSON encoding. + BinaryMarshaler: cbor.BinaryMarshalerNone, + }.UserBufferEncMode() + if err != nil { + panic(err) + } + return encode + }(), +} + +var EncodeNondeterministic = EncMode{ + delegate: func() cbor.UserBufferEncMode { + opts := Encode.options() + opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0. + em, err := opts.UserBufferEncMode() + if err != nil { + panic(err) + } + return em + }(), +} + +type EncMode struct { + delegate cbor.UserBufferEncMode +} + +func (em EncMode) options() cbor.EncOptions { + return em.delegate.EncOptions() +} + +func (em EncMode) MarshalTo(v interface{}, w io.Writer) error { + if buf, ok := w.(*buffer); ok { + return em.delegate.MarshalToBuffer(v, &buf.Buffer) + } + + buf := buffers.Get() + defer buffers.Put(buf) + if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil { + return err + } + + if _, err := io.Copy(w, buf); err != nil { + return err + } + + return nil +} + +func (em EncMode) Marshal(v interface{}) ([]byte, error) { + buf := buffers.Get() + defer buffers.Put(buf) + + if err := em.MarshalTo(v, &buf.Buffer); err != nil { + return nil, err + } + + clone := make([]byte, buf.Len()) + copy(clone, buf.Bytes()) + + return clone, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 21944f2d8..ff9820842 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -259,8 +259,7 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. -// -// All other callers will be forced to request a Codec directly. +// All other callers will be forced to request a Codec directly. func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 6c082f660..1ae4a32eb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -166,7 +166,20 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i strictErrs, err := s.unmarshal(into, data, originalData) if err != nil { return nil, actual, err - } else if len(strictErrs) > 0 { + } + + // when decoding directly into a provided unstructured object, + // extract the actual gvk decoded from the provided data, + // and ensure it is non-empty. + if isUnstructured { + *actual = into.GetObjectKind().GroupVersionKind() + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr(string(originalData)) + } + // TODO(109023): require apiVersion here as well once unstructuredJSONScheme#Decode does + } + + if len(strictErrs) > 0 { return into, actual, runtime.NewStrictDecodingError(strictErrs) } return into, actual, nil @@ -261,9 +274,9 @@ func (s *Serializer) unmarshal(into runtime.Object, data, originalData []byte) ( var strictJSONErrs []error if u, isUnstructured := into.(runtime.Unstructured); isUnstructured { // Unstructured is a custom unmarshaler that gets delegated - // to, so inorder to detect strict JSON errors we need + // to, so in order to detect strict JSON errors we need // to unmarshal directly into the object. - m := u.UnstructuredContent() + m := map[string]interface{}{} strictJSONErrs, err = kjson.UnmarshalStrict(data, &m) u.SetUnstructuredContent(m) } else { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index 8358d77c3..c63e6dc63 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/util/framer" + "k8s.io/klog/v2" ) var ( @@ -86,6 +87,7 @@ type Serializer struct { } var _ runtime.Serializer = &Serializer{} +var _ runtime.EncoderWithAllocator = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} const serializerIdentifier runtime.Identifier = "protobuf" @@ -161,22 +163,36 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw) } +// EncodeWithAllocator writes an object to the provided writer. +// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization. +func (s *Serializer) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + return s.encode(obj, w, memAlloc) +} + // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + return s.encode(obj, w, &runtime.SimpleAllocator{}) +} + +func (s *Serializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(s.Identifier(), s.doEncode, w) + return co.CacheEncode(s.Identifier(), func(obj runtime.Object, w io.Writer) error { return s.doEncode(obj, w, memAlloc) }, w) } - return s.doEncode(obj, w) + return s.doEncode(obj, w, memAlloc) } -func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { +func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + if memAlloc == nil { + klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator") + memAlloc = &runtime.SimpleAllocator{} + } prefixSize := uint64(len(s.prefix)) var unk runtime.Unknown switch t := obj.(type) { case *runtime.Unknown: estimatedSize := prefixSize + uint64(t.Size()) - data := make([]byte, estimatedSize) + data := memAlloc.Allocate(estimatedSize) i, err := t.MarshalTo(data[prefixSize:]) if err != nil { return err @@ -196,11 +212,11 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case bufferedMarshaller: - // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalToSizedBuffer methods + // this path performs a single allocation during write only when the Allocator wasn't provided + // it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods encodedSize := uint64(t.Size()) estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) - data := make([]byte, estimatedSize) + data := memAlloc.Allocate(estimatedSize) i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize) if err != nil { @@ -221,7 +237,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { unk.Raw = data estimatedSize := prefixSize + uint64(unk.Size()) - data = make([]byte, estimatedSize) + data = memAlloc.Allocate(estimatedSize) i, err := unk.MarshalTo(data[prefixSize:]) if err != nil { @@ -395,19 +411,33 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, // Encode serializes the provided object to the given writer. Overrides is ignored. func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { + return s.encode(obj, w, &runtime.SimpleAllocator{}) +} + +// EncodeWithAllocator writes an object to the provided writer. +// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization. +func (s *RawSerializer) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + return s.encode(obj, w, memAlloc) +} + +func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(s.Identifier(), s.doEncode, w) + return co.CacheEncode(s.Identifier(), func(obj runtime.Object, w io.Writer) error { return s.doEncode(obj, w, memAlloc) }, w) } - return s.doEncode(obj, w) + return s.doEncode(obj, w, memAlloc) } -func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error { +func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + if memAlloc == nil { + klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator") + memAlloc = &runtime.SimpleAllocator{} + } switch t := obj.(type) { case bufferedReverseMarshaller: - // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalToSizedBuffer methods + // this path performs a single allocation during write only when the Allocator wasn't provided + // it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods encodedSize := uint64(t.Size()) - data := make([]byte, encodedSize) + data := memAlloc.Allocate(encodedSize) n, err := t.MarshalToSizedBuffer(data) if err != nil { @@ -417,10 +447,10 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error { return err case bufferedMarshaller: - // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalTo methods + // this path performs a single allocation during write only when the Allocator wasn't provided + // it also requires the caller to implement the more efficient Size and MarshalTo methods encodedSize := uint64(t.Size()) - data := make([]byte, encodedSize) + data := memAlloc.Allocate(encodedSize) n, err := t.MarshalTo(data) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index ea7c580bd..25f955ed7 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -89,6 +89,8 @@ type codec struct { originalSchemeName string } +var _ runtime.EncoderWithAllocator = &codec{} + var identifiersMap sync.Map type codecIdentifier struct { @@ -133,24 +135,34 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } } - var strictDecodingErr error + var strictDecodingErrs []error obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) if err != nil { - if obj != nil && runtime.IsStrictDecodingError(err) { - // save the strictDecodingError and the caller decide what to do with it - strictDecodingErr = err + if strictErr, ok := runtime.AsStrictDecodingError(err); obj != nil && ok { + // save the strictDecodingError and let the caller decide what to do with it + strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...) } else { return nil, gvk, err } } - // TODO: look into strict handling of nested object decoding if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { - return nil, gvk, err + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{Decoder: c.decoder}); err != nil { + if strictErr, ok := runtime.AsStrictDecodingError(err); ok { + // save the strictDecodingError let and the caller decide what to do with it + strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...) + } else { + return nil, gvk, err + + } } } + // aggregate the strict decoding errors into one + var strictDecodingErr error + if len(strictDecodingErrs) > 0 { + strictDecodingErr = runtime.NewStrictDecodingError(strictDecodingErrs) + } // if we specify a target, use generic conversion. if into != nil { // perform defaulting if requested @@ -182,19 +194,40 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru return out, gvk, strictDecodingErr } +// EncodeWithAllocator ensures the provided object is output in the appropriate group and version, invoking +// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. +// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization. +func (c *codec) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + return c.encode(obj, w, memAlloc) +} + // Encode ensures the provided object is output in the appropriate group and version, invoking // conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. func (c *codec) Encode(obj runtime.Object, w io.Writer) error { + return c.encode(obj, w, nil) +} + +func (c *codec) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(c.Identifier(), c.doEncode, w) + return co.CacheEncode(c.Identifier(), func(obj runtime.Object, w io.Writer) error { return c.doEncode(obj, w, memAlloc) }, w) } - return c.doEncode(obj, w) + return c.doEncode(obj, w, memAlloc) } -func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { +func (c *codec) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + encodeFn := c.encoder.Encode + if memAlloc != nil { + if encoder, supportsAllocator := c.encoder.(runtime.EncoderWithAllocator); supportsAllocator { + encodeFn = func(obj runtime.Object, w io.Writer) error { + return encoder.EncodeWithAllocator(obj, w, memAlloc) + } + } else { + klog.V(6).Infof("a memory allocator was provided but the encoder %s doesn't implement the runtime.EncoderWithAllocator, using regular encoder.Encode method", c.encoder.Identifier()) + } + } switch obj := obj.(type) { case *runtime.Unknown: - return c.encoder.Encode(obj, w) + return encodeFn(obj, w) case runtime.Unstructured: // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just // because the top-level type matches our desired destination type. actually send the object to the converter @@ -203,14 +236,14 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl) objGVK := obj.GetObjectKind().GroupVersionKind() if len(objGVK.Version) == 0 { - return c.encoder.Encode(obj, w) + return encodeFn(obj, w) } targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK}) if !ok { return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion) } if targetGVK == objGVK { - return c.encoder.Encode(obj, w) + return encodeFn(obj, w) } } } @@ -232,7 +265,7 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { } } objectKind.SetGroupVersionKind(gvks[0]) - return c.encoder.Encode(obj, w) + return encodeFn(obj, w) } // Perform a conversion if necessary @@ -248,7 +281,7 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { } // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object - return c.encoder.Encode(out, w) + return encodeFn(out, w) } // Identifier implements runtime.Encoder interface. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/splice.go b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go new file mode 100644 index 000000000..2badb7b97 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go @@ -0,0 +1,76 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "io" +) + +// Splice is the interface that wraps the Splice method. +// +// Splice moves data from given slice without copying the underlying data for +// efficiency purpose. Therefore, the caller should make sure the underlying +// data is not changed later. +type Splice interface { + Splice([]byte) + io.Writer + Reset() + Bytes() []byte +} + +// A spliceBuffer implements Splice and io.Writer interfaces. +type spliceBuffer struct { + raw []byte + buf *bytes.Buffer +} + +func NewSpliceBuffer() Splice { + return &spliceBuffer{} +} + +// Splice implements the Splice interface. +func (sb *spliceBuffer) Splice(raw []byte) { + sb.raw = raw +} + +// Write implements the io.Writer interface. +func (sb *spliceBuffer) Write(p []byte) (n int, err error) { + if sb.buf == nil { + sb.buf = &bytes.Buffer{} + } + return sb.buf.Write(p) +} + +// Reset resets the buffer to be empty. +func (sb *spliceBuffer) Reset() { + if sb.buf != nil { + sb.buf.Reset() + } + sb.raw = nil +} + +// Bytes returns the data held by the buffer. +func (sb *spliceBuffer) Bytes() []byte { + if sb.buf != nil && len(sb.buf.Bytes()) > 0 { + return sb.buf.Bytes() + } + if sb.raw != nil { + return sb.raw + } + return []byte{} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 3dc9a5a2f..1680c149f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -46,6 +46,7 @@ const ( ContentTypeJSON string = "application/json" ContentTypeYAML string = "application/yaml" ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeCBOR string = "application/cbor" ) // RawExtension is used to hold extensions in external versions. @@ -123,7 +124,7 @@ type Unknown struct { // Raw will hold the complete serialized object which couldn't be matched // with a registered type. Most likely, nothing should be done with this // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,2,opt,name=raw"` // ContentEncoding is encoding used to encode 'Raw' data. // Unspecified means no encoding. ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index b19750f3a..db18ce1ce 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -37,3 +37,14 @@ const ( func (n NamespacedName) String() string { return n.Namespace + string(Separator) + n.Name } + +// MarshalLog emits a struct containing required key/value pair +func (n NamespacedName) MarshalLog() interface{} { + return struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + }{ + Name: n.Name, + Namespace: n.Namespace, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index 0d2f153bf..1396274c7 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -40,6 +40,13 @@ func NewExpiringWithClock(clock clock.Clock) *Expiring { // Expiring is a map whose entries expire after a per-entry timeout. type Expiring struct { + // AllowExpiredGet causes the expiration check to be skipped on Get. + // It should only be used when a key always corresponds to the exact same value. + // Thus when this field is true, expired keys are considered valid + // until the next call to Set (which causes the GC to run). + // It may not be changed concurrently with calls to Get. + AllowExpiredGet bool + clock clock.Clock // mu protects the below fields @@ -70,7 +77,10 @@ func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { c.mu.RLock() defer c.mu.RUnlock() e, ok := c.cache[key] - if !ok || !c.clock.Now().Before(e.expiry) { + if !ok { + return nil, false + } + if !c.AllowExpiredGet && !c.clock.Now().Before(e.expiry) { return nil, false } return e.val, true diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go index 1328dd612..ad486d580 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -136,6 +136,19 @@ func (c *LRUExpireCache) Remove(key interface{}) { delete(c.entries, key) } +// RemoveAll removes all keys that match predicate. +func (c *LRUExpireCache) RemoveAll(predicate func(key any) bool) { + c.lock.Lock() + defer c.lock.Unlock() + + for key, element := range c.entries { + if predicate(key) { + c.evictionList.Remove(element) + delete(c.entries, key) + } + } +} + // Keys returns all unexpired keys in the cache. // // Keep in mind that subsequent calls to Get() for any of the returned keys diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index fa9ffa51b..fc0301844 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -23,34 +23,20 @@ import ( "strings" "text/tabwriter" - "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/dump" ) -// StringDiff diffs a and b and returns a human readable diff. -func StringDiff(a, b string) string { - ba := []byte(a) - bb := []byte(b) - out := []byte{} - i := 0 - for ; i < len(ba) && i < len(bb); i++ { - if ba[i] != bb[i] { - break - } - out = append(out, ba[i]) - } - out = append(out, []byte("\n\nA: ")...) - out = append(out, ba[i:]...) - out = append(out, []byte("\n\nB: ")...) - out = append(out, bb[i:]...) - out = append(out, []byte("\n\n")...) - return string(out) -} - func legacyDiff(a, b interface{}) string { return cmp.Diff(a, b) } +// StringDiff diffs a and b and returns a human readable diff. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func StringDiff(a, b string) string { + return legacyDiff(a, b) +} + // ObjectDiff prints the diff of two go objects and fails if the objects // contain unhandled unexported fields. // DEPRECATED: use github.com/google/go-cmp/cmp.Diff @@ -75,13 +61,8 @@ func ObjectReflectDiff(a, b interface{}) string { // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, // enabling easy visual scanning for mismatches. func ObjectGoPrintSideBySide(a, b interface{}) string { - s := spew.ConfigState{ - Indent: " ", - // Extra deep spew. - DisableMethods: true, - } - sA := s.Sdump(a) - sB := s.Sdump(b) + sA := dump.Pretty(a) + sB := dump.Pretty(b) linesA := strings.Split(sA, "\n") linesB := strings.Split(sB, "\n") @@ -135,7 +116,7 @@ func IgnoreUnset() cmp.Option { if v2.Len() == 0 { return true } - case reflect.Interface, reflect.Ptr: + case reflect.Interface, reflect.Pointer: if v2.IsNil() { return true } diff --git a/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go new file mode 100644 index 000000000..cf61ef76a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dump + +import ( + "github.com/davecgh/go-spew/spew" +) + +var prettyPrintConfig = &spew.ConfigState{ + Indent: " ", + DisableMethods: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// The config MUST NOT be changed because that could change the result of a hash operation +var prettyPrintConfigForHash = &spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// Pretty wrap the spew.Sdump with Indent, and disabled methods like error() and String() +// The output may change over time, so for guaranteed output please take more direct control +func Pretty(a interface{}) string { + return prettyPrintConfig.Sdump(a) +} + +// ForHash keeps the original Spew.Sprintf format to ensure the same checksum +func ForHash(a interface{}) string { + return prettyPrintConfigForHash.Sprintf("%#v", a) +} + +// OneLine outputs the object in one line +func OneLine(a interface{}) string { + return prettyPrintConfig.Sprintf("%#v", a) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 1f5a04fd4..1b60d145c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -214,7 +214,7 @@ func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { return NewAggregate(result) } -// Reduce will return err or, if err is an Aggregate and only has one item, +// Reduce will return err or nil, if err is an Aggregate and only has one item, // the first item in the aggregate. func Reduce(err error) error { if agg, ok := err.(Aggregate); ok && err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index ca08f8561..1ab8fd396 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -32,7 +32,7 @@ func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { return &lengthDelimitedFrameWriter{w: w} } -// Write writes a single frame to the nested writer, prepending it with the length in +// Write writes a single frame to the nested writer, prepending it with the length // in bytes of data (as a 4 byte, bigendian uint32). func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) @@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see // data written to data, or be larger than data and a different array. - n := len(data) m := json.RawMessage(data[:0]) if err := r.decoder.Decode(&m); err != nil { return 0, err @@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // If capacity of data is less than length of the message, decoder will allocate a new slice // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. - if len(m) > n { - //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. - data = append(data[0:0], m[:n]...) - r.remaining = m[n:] - return n, io.ErrShortBuffer + if len(m) > cap(data) { + copy(data, m) + r.remaining = m[len(data):] + return len(data), io.ErrShortBuffer } + + if len(m) > len(data) { + // The bytes beyond len(data) were stored in data's underlying array, which we do + // not own after this function returns. + r.remaining = append([]byte(nil), m[len(data):]...) + return len(data), io.ErrShortBuffer + } + return len(m), nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index a4792034a..1f2877399 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// source: k8s.io/apimachinery/pkg/util/intstr/generated.proto package intstr @@ -43,7 +43,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *IntOrString) Reset() { *m = IntOrString{} } func (*IntOrString) ProtoMessage() {} func (*IntOrString) Descriptor() ([]byte, []int) { - return fileDescriptor_94e046ae3ce6121c, []int{0} + return fileDescriptor_771bacc35a5ec189, []int{0} } func (m *IntOrString) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,30 +73,29 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) + proto.RegisterFile("k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_771bacc35a5ec189) } -var fileDescriptor_94e046ae3ce6121c = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, +var fileDescriptor_771bacc35a5ec189 = []byte{ + // 277 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xce, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2d, 0xc9, 0xcc, 0xd1, 0xcf, 0xcc, 0x2b, 0x29, 0x2e, 0x29, 0xd2, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x52, 0x86, 0x68, 0xd2, 0x43, 0xd6, 0xa4, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd2, 0xa4, 0x07, 0xd1, + 0x24, 0xa5, 0x9b, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x9e, 0x9f, + 0x9e, 0xaf, 0x0f, 0xd6, 0x9b, 0x54, 0x9a, 0x06, 0xe6, 0x81, 0x39, 0x60, 0x16, 0xc4, 0x4c, 0xa5, + 0x89, 0x8c, 0x5c, 0xdc, 0x9e, 0x79, 0x25, 0xfe, 0x45, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x42, + 0x1a, 0x5c, 0x2c, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x4e, 0x22, 0x27, + 0xee, 0xc9, 0x33, 0x3c, 0xba, 0x27, 0xcf, 0x12, 0x52, 0x59, 0x90, 0xfa, 0x0b, 0x4a, 0x07, 0x81, + 0x55, 0x08, 0xa9, 0x71, 0xb1, 0x65, 0xe6, 0x95, 0x84, 0x25, 0xe6, 0x48, 0x30, 0x29, 0x30, 0x6a, + 0xb0, 0x3a, 0xf1, 0x41, 0xd5, 0xb2, 0x79, 0x82, 0x45, 0x83, 0xa0, 0xb2, 0x20, 0x75, 0xc5, 0x25, + 0x45, 0x20, 0x75, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x08, 0x75, 0xc1, 0x60, 0xd1, 0x20, 0xa8, 0xac, + 0x15, 0xc7, 0x8c, 0x05, 0xf2, 0x0c, 0x0d, 0x77, 0x14, 0x18, 0x9c, 0x3c, 0x4f, 0x3c, 0x94, 0x63, + 0xb8, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, + 0x31, 0x5e, 0x78, 0x24, 0xc7, 0x78, 0xe3, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, + 0xcb, 0x31, 0x44, 0x29, 0x13, 0x11, 0x84, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0xa1, 0x0b, + 0x1e, 0x68, 0x01, 0x00, 0x00, } func (m *IntOrString) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index a76f79851..7c63c5e45 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -22,7 +22,7 @@ syntax = "proto2"; package k8s.io.apimachinery.pkg.util.intstr; // Package-wide variables from generator "generated". -option go_package = "intstr"; +option go_package = "k8s.io/apimachinery/pkg/util/intstr"; // IntOrString is a type that can hold an int32 or a string. When used in // JSON or YAML marshalling and unmarshalling, it produces or consumes the diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index c0e8927fe..5fd2e16c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/klog/v2" ) @@ -54,7 +55,7 @@ const ( // FromInt creates an IntOrString object with an int32 value. It is // your responsibility not to call this method with a value greater // than int32. -// TODO: convert to (val int32) +// Deprecated: use FromInt32 instead. func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) @@ -62,19 +63,24 @@ func FromInt(val int) IntOrString { return IntOrString{Type: Int, IntVal: int32(val)} } +// FromInt32 creates an IntOrString object with an int32 value. +func FromInt32(val int32) IntOrString { + return IntOrString{Type: Int, IntVal: val} +} + // FromString creates an IntOrString object with a string value. func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } -// Parse the given string and try to convert it to an integer before +// Parse the given string and try to convert it to an int32 integer before // setting it as a string value. func Parse(val string) IntOrString { - i, err := strconv.Atoi(val) + i, err := strconv.ParseInt(val, 10, 32) if err != nil { return FromString(val) } - return FromInt(i) + return FromInt32(int32(i)) } // UnmarshalJSON implements the json.Unmarshaller interface. @@ -87,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { return json.Unmarshal(value, &intstr.IntVal) } +func (intstr *IntOrString) UnmarshalCBOR(value []byte) error { + if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil { + intstr.Type = String + return nil + } + + if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil { + return err + } + + intstr.Type = Int + return nil +} + // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { if intstr == nil { @@ -121,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } +func (intstr IntOrString) MarshalCBOR() ([]byte, error) { + switch intstr.Type { + case Int: + return cbor.Marshal(intstr.IntVal) + case String: + return cbor.Marshal(intstr.StrVal) + default: + return nil, fmt.Errorf("impossible IntOrString.Type") + } +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // @@ -131,6 +162,10 @@ func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // the OpenAPI spec of this type. func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } +// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing +// the OpenAPI v3 spec of this type. +func (IntOrString) OpenAPIV3OneOfTypes() []string { return []string{"integer", "string"} } + func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { if intOrPercent == nil { return &defaultValue @@ -141,7 +176,7 @@ func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrS // GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent. // This method returns a scaled value from an IntOrString type. If the IntOrString // is a percentage string value it's treated as a percentage and scaled appropriately -// in accordance to the total, if it's an int value it's treated as a a simple value and +// in accordance to the total, if it's an int value it's treated as a simple value and // if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error. func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { if intOrPercent == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml new file mode 100644 index 000000000..a667e9834 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go index 792badbc3..d2ce66c1b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go @@ -45,7 +45,7 @@ import ( // and their field paths and types are exactly the same, then ExtractInto can be // called with the root resource as the object and the subresource as the // applyConfiguration. This works for "status", obviously, because status is -// represented by the exact same object as the root resource. This this does NOT +// represented by the exact same object as the root resource. This does NOT // work, for example, with the "scale" subresources of Deployment, ReplicaSet and // StatefulSet. While the spec.replicas, status.replicas fields are in the same // exact field path locations as they are in autoscaling.Scale, the selector diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go new file mode 100644 index 000000000..978ffb3c3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager = internal.FieldManager + +// NewDefaultFieldManager creates a new FieldManager that merges apply requests +// and update managed fields for other types of requests. +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { + f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +// NewDefaultCRDFieldManager creates a new FieldManager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { + f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error { + _, err := internal.DecodeManagedFields(encodedManagedFields) + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go index 0cc228af9..408739c50 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/gvkparser.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kube-openapi/pkg/schemaconv" "k8s.io/kube-openapi/pkg/util/proto" + smdschema "sigs.k8s.io/structured-merge-diff/v4/schema" "sigs.k8s.io/structured-merge-diff/v4/typed" ) @@ -58,7 +59,7 @@ func NewGVKParser(models proto.Models, preserveUnknownFields bool) (*GvkParser, parser := GvkParser{ gvks: map[schema.GroupVersionKind]string{}, } - parser.parser = typed.Parser{Schema: *typeSchema} + parser.parser = typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}} for _, modelName := range models.ListModels() { model := models.LookupModel(modelName) if model == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go new file mode 100644 index 000000000..b75ef7416 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go new file mode 100644 index 000000000..fa342ca13 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion + subresource string +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion, subresource string) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + subresource: subresource, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + Subresource: f.subresource, + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return BuildManagerIdentifier(&managerInfo) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go new file mode 100644 index 000000000..8951932ba --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if !fields.Applied() { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() + } + if jTime != nil { + jSeconds = jTime.Unix() + } + if iSeconds != jSeconds { + return iSeconds < jSeconds + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go new file mode 100644 index 000000000..8c044c915 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// NewConflictError returns an error including details on the requests apply conflicts +func NewConflictError(conflicts merge.Conflicts) *errors.StatusError { + causes := []metav1.StatusCause{} + for _, conflict := range conflicts { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeFieldManagerConflict, + Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)), + Field: conflict.Path.String(), + }) + } + return errors.NewApplyConflict(causes, getConflictMessage(conflicts)) +} + +func getConflictMessage(conflicts merge.Conflicts) string { + if len(conflicts) == 1 { + return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path) + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + uniqueManagers := []string{} + for manager := range m { + uniqueManagers = append(uniqueManagers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(uniqueManagers) + + messages := []string{} + for _, manager := range uniqueManagers { + messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager))) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n")) +} + +func printManager(manager string) string { + encodedManager := &metav1.ManagedFieldsEntry{} + if err := json.Unmarshal([]byte(manager), encodedManager); err != nil { + return fmt.Sprintf("%q", manager) + } + managerStr := fmt.Sprintf("%q", encodedManager.Manager) + if encodedManager.Subresource != "" { + managerStr = fmt.Sprintf("%s with subresource %q", managerStr, encodedManager.Subresource) + } + if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate { + if encodedManager.Time == nil { + return fmt.Sprintf("%s using %v", managerStr, encodedManager.APIVersion) + } + return fmt.Sprintf("%s using %v at %v", managerStr, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339)) + } + return managerStr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go new file mode 100644 index 000000000..eca04a711 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go @@ -0,0 +1,209 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = NewAtMostEvery(time.Second) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager struct { + fieldManager Manager + subresource string +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, subresource string) *FieldManager { + return &FieldManager{fieldManager: f, subresource: subresource} +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { + return NewFieldManager( + NewVersionCheckManager( + NewLastAppliedUpdater( + NewLastAppliedManager( + NewProbabilisticSkipNonAppliedManager( + NewCapManagersManager( + NewBuildManagerInfoManager( + NewManagedFieldsUpdater( + NewStripMetaManager(f), + ), kind.GroupVersion(), subresource, + ), DefaultMaxUpdateManagers, + ), objectCreater, DefaultTrackOnCreateProbability, + ), typeConverter, objectConverter, kind.GroupVersion(), + ), + ), kind, + ), subresource, + ) +} + +func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return NewEmptyManaged(), nil + } + + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + managed, err := DecodeManagedFields(newAccessor.GetManagedFields()) + if err != nil || len(managed.Fields()) == 0 { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + return managed, nil +} + +func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) { + if err != nil { + return NewEmptyManaged(), nil + } + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + isSubresource := f.subresource != "" + managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource) + if err != nil { + return newObj, nil + } + + RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + ns, name := "unknown", "unknown" + if accessor, err := meta.Accessor(newObj); err == nil { + ns = accessor.GetNamespace() + name = accessor.GetName() + } + + klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind", + newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := DecodeManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, NewConflictError(conflicts) + } + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go new file mode 100644 index 000000000..08186191a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// EmptyFields represents a set with no paths +// It looks like metav1.Fields{Raw: []byte("{}")} +var EmptyFields = func() metav1.FieldsV1 { + f, err := SetToFields(*fieldpath.NewSet()) + if err != nil { + panic("should never happen") + } + return f +}() + +// FieldsToSet creates a set paths from an input trie of fields +func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) { + err = s.FromJSON(bytes.NewReader(f.Raw)) + return s, err +} + +// SetToFields creates a trie of fields from an input set of paths +func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) { + f.Raw, err = s.ToJSON() + return f, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go new file mode 100644 index 000000000..b00b6b829 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" +) + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +// +// This is a copy of the corev1 annotation since we don't want to depend on the whole package. +const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration" + +// SetLastApplied sets the last-applied annotation the given value in +// the object. +func SetLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[LastAppliedConfigAnnotation] = value + if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil { + delete(annotations, LastAppliedConfigAnnotation) + } + accessor.SetAnnotations(annotations) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go new file mode 100644 index 000000000..3f6cf8821 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go @@ -0,0 +1,171 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type lastAppliedManager struct { + fieldManager Manager + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + groupVersion schema.GroupVersion +} + +var _ Manager = &lastAppliedManager{} + +// NewLastAppliedManager converts the client-side apply annotation to +// server-side apply managed fields +func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager { + return &lastAppliedManager{ + fieldManager: fieldManager, + typeConverter: typeConverter, + objectConverter: objectConverter, + groupVersion: groupVersion, + } +} + +// Update implements Manager. +func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply will consider the last-applied annotation +// for upgrading an object managed by client-side apply to server-side apply +// without conflicts. +func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + // Upgrade the client-side apply annotation only from kubectl server-side-apply. + // To opt-out of this behavior, users may specify a different field manager. + if manager != "kubectl" { + return newLiveObj, newManaged, newErr + } + + // Check if we have conflicts + if newErr == nil { + return newLiveObj, newManaged, newErr + } + conflicts, ok := newErr.(merge.Conflicts) + if !ok { + return newLiveObj, newManaged, newErr + } + conflictSet := conflictsToSet(conflicts) + + // Check if conflicts are allowed due to client-side apply, + // and if so, then force apply + allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj) + if err != nil { + return newLiveObj, newManaged, newErr + } + if !conflictSet.Difference(allowedConflictSet).Empty() { + newConflicts := conflictsDifference(conflicts, allowedConflictSet) + return newLiveObj, newManaged, newConflicts + } + + return f.fieldManager.Apply(liveObj, newObj, managed, manager, true) +} + +func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) { + var accessor, err = meta.Accessor(liveObj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // If there is no client-side apply annotation, then there is nothing to do + var annotations = accessor.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("no last applied annotation") + } + var lastApplied, ok = annotations[LastAppliedConfigAnnotation] + if !ok || lastApplied == "" { + return nil, fmt.Errorf("no last applied annotation") + } + + liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to typed: %v", err) + } + + var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}} + err = json.Unmarshal([]byte(lastApplied), lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied) + } + + if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() { + return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err) + } + + lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to convert last applied to typed: %v", err) + } + + lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err) + } + + comparison, err := lastAppliedObjTyped.Compare(liveObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err) + } + + // Remove fields in last applied that are different, added, or missing in + // the live object. + // Because last-applied fields don't match the live object fields, + // then we don't own these fields. + lastAppliedObjFieldSet = lastAppliedObjFieldSet. + Difference(comparison.Modified). + Difference(comparison.Added). + Difference(comparison.Removed) + + return lastAppliedObjFieldSet, nil +} + +// TODO: replace with merge.Conflicts.ToSet() +func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set { + conflictSet := fieldpath.NewSet() + for _, conflict := range []merge.Conflict(conflicts) { + conflictSet.Insert(conflict.Path) + } + return conflictSet +} + +func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts { + newConflicts := []merge.Conflict{} + for _, conflict := range []merge.Conflict(conflicts) { + if !s.Has(conflict.Path) { + newConflicts = append(newConflicts, conflict) + } + } + return newConflicts +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go new file mode 100644 index 000000000..06e6c5d8c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type lastAppliedUpdater struct { + fieldManager Manager +} + +var _ Manager = &lastAppliedUpdater{} + +// NewLastAppliedUpdater sets the client-side apply annotation up to date with +// server-side apply managed fields +func NewLastAppliedUpdater(fieldManager Manager) Manager { + return &lastAppliedUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// server-side apply managed fields +func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + if err != nil { + return liveObj, managed, err + } + + // Sync the client-side apply annotation only from kubectl server-side apply. + // To opt-out of this behavior, users may specify a different field manager. + // + // If the client-side apply annotation doesn't exist, + // then continue because we have no annotation to update + if manager == "kubectl" && hasLastApplied(liveObj) { + lastAppliedValue, err := buildLastApplied(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) + } + err = SetLastApplied(liveObj, lastAppliedValue) + if err != nil { + return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) + } + } + return liveObj, managed, err +} + +func hasLastApplied(obj runtime.Object) bool { + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + return false + } + lastApplied, ok := annotations[LastAppliedConfigAnnotation] + return ok && len(lastApplied) > 0 +} + +func buildLastApplied(obj runtime.Object) (string, error) { + obj = obj.DeepCopyObject() + + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // Remove the annotation from the object before encoding the object + var annotations = accessor.GetAnnotations() + delete(annotations, LastAppliedConfigAnnotation) + accessor.SetAnnotations(annotations) + + lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err) + } + return string(lastApplied), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go new file mode 100644 index 000000000..9b4c20326 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() ManagedInterface { + return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{}) +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } +} + +// RemoveObjectManagedFields removes the ManagedFields from the object +// before we merge so that it doesn't appear in the ManagedFields +// recursively. +func RemoveObjectManagedFields(obj runtime.Object) { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + accessor.SetManagedFields(nil) +} + +// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + encodedManagedFields, err := encodeManagedFields(managed) + if err != nil { + return fmt.Errorf("failed to convert back managed fields to API: %v", err) + } + accessor.SetManagedFields(encodedManagedFields) + + return nil +} + +// DecodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + managed := managedStruct{} + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + return nil, fmt.Errorf("operation must be `Apply` or `Update`") + } + if len(encodedVersionedSet.APIVersion) < 1 { + return nil, fmt.Errorf("apiVersion must not be empty") + } + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } + manager, err := BuildManagerIdentifier(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + } + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + } + managed.times[manager] = encodedVersionedSet.Time + } + return &managed, nil +} + +// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry +func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) { + encodedManagerCopy := *encodedManager + + // Never include fields type in the manager identifier + encodedManagerCopy.FieldsType = "" + + // Never include the fields in the manager identifier + encodedManagerCopy.FieldsV1 = nil + + // Never include the time in the manager identifier + encodedManagerCopy.Time = nil + + // For appliers, don't include the APIVersion in the manager identifier, + // so it will always have the same manager identifier each time it applied. + if encodedManager.Operation == metav1.ManagedFieldsOperationApply { + encodedManagerCopy.APIVersion = "" + } + + // Use the remaining fields to build the manager identifier + b, err := json.Marshal(&encodedManagerCopy) + if err != nil { + return "", fmt.Errorf("error marshalling manager identifier: %v", err) + } + + return string(b), nil +} + +func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) { + fields := EmptyFields + if encodedVersionedSet.FieldsV1 != nil { + fields = *encodedVersionedSet.FieldsV1 + } + set, err := FieldsToSet(fields) + if err != nil { + return nil, fmt.Errorf("error decoding set: %v", err) + } + return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil +} + +// encodeManagedFields converts ManagedFields from the format used by +// sigs.k8s.io/structured-merge-diff to the wire format (api format) +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } + encodedManagedFields = []metav1.ManagedFieldsEntry{} + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] + v, err := encodeManagerVersionedSet(manager, versionedSet) + if err != nil { + return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) + } + if t, ok := managed.Times()[manager]; ok { + v.Time = t + } + encodedManagedFields = append(encodedManagedFields, *v) + } + return sortEncodedManagedFields(encodedManagedFields) +} + +func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) { + sort.Slice(encodedManagedFields, func(i, j int) bool { + p, q := encodedManagedFields[i], encodedManagedFields[j] + + if p.Operation != q.Operation { + return p.Operation < q.Operation + } + + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() + } + if q.Time != nil { + qSeconds = q.Time.Unix() + } + if pSeconds != qSeconds { + return pSeconds < qSeconds + } + + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + + if p.APIVersion != q.APIVersion { + return p.APIVersion < q.APIVersion + } + return p.Subresource < q.Subresource + }) + + return encodedManagedFields, nil +} + +func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) { + encodedVersionedSet = &metav1.ManagedFieldsEntry{} + + // Get as many fields as we can from the manager identifier + err = json.Unmarshal([]byte(manager), encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err) + } + + // Get the APIVersion, Operation, and Fields from the VersionedSet + encodedVersionedSet.APIVersion = string(versionedSet.APIVersion()) + if versionedSet.Applied() { + encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply + } + encodedVersionedSet.FieldsType = "FieldsV1" + fields, err := SetToFields(*versionedSet.Set()) + if err != nil { + return nil, fmt.Errorf("error encoding set: %v", err) + } + encodedVersionedSet.FieldsV1 = &fields + + return encodedVersionedSet, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go new file mode 100644 index 000000000..376eed6b2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type managedFieldsUpdater struct { + fieldManager Manager +} + +var _ Manager = &managedFieldsUpdater{} + +// NewManagedFieldsUpdater is responsible for updating the managedfields +// in the object, updating the time of the operation as necessary. For +// updates, it uses a hard-coded manager to detect if things have +// changed, and swaps back the correct manager after the operation is +// done. +func NewManagedFieldsUpdater(fieldManager Manager) Manager { + return &managedFieldsUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + self := "current-operation" + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self) + if err != nil { + return object, managed, err + } + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + } + + return object, managed, nil +} + +// Apply implements Manager. +func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) + if err != nil { + return object, managed, err + } + if object != nil { + managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} + } else { + object = liveObj.DeepCopyObject() + RemoveObjectManagedFields(object) + } + return object, managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go new file mode 100644 index 000000000..053936103 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go new file mode 100644 index 000000000..1954d65d3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +const ( + // Field indicates that the content of this path element is a field's name + Field = "f" + + // Value indicates that the content of this path element is a field's value + Value = "v" + + // Index indicates that the content of this path element is an index in an array + Index = "i" + + // Key indicates that the content of this path element is a key value map + Key = "k" + + // Separator separates the type of a path element from the contents + Separator = ":" +) + +// NewPathElement parses a serialized path element +func NewPathElement(s string) (fieldpath.PathElement, error) { + split := strings.SplitN(s, Separator, 2) + if len(split) < 2 { + return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch split[0] { + case Field: + return fieldpath.PathElement{ + FieldName: &split[1], + }, nil + case Value: + val, err := value.FromJSON([]byte(split[1])) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Value: &val, + }, nil + case Index: + i, err := strconv.Atoi(split[1]) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Index: &i, + }, nil + case Key: + kv := map[string]json.RawMessage{} + err := json.Unmarshal([]byte(split[1]), &kv) + if err != nil { + return fieldpath.PathElement{}, err + } + fields := value.FieldList{} + for k, v := range kv { + b, err := json.Marshal(v) + if err != nil { + return fieldpath.PathElement{}, err + } + val, err := value.FromJSON(b) + if err != nil { + return fieldpath.PathElement{}, err + } + + fields = append(fields, value.Field{ + Name: k, + Value: val, + }) + } + return fieldpath.PathElement{ + Key: &fields, + }, nil + default: + // Ignore unknown key types + return fieldpath.PathElement{}, nil + } +} + +// PathElementString serializes a path element +func PathElementString(pe fieldpath.PathElement) (string, error) { + switch { + case pe.FieldName != nil: + return Field + Separator + *pe.FieldName, nil + case pe.Key != nil: + kv := map[string]json.RawMessage{} + for _, k := range *pe.Key { + b, err := value.ToJSON(k.Value) + if err != nil { + return "", err + } + m := json.RawMessage{} + err = json.Unmarshal(b, &m) + if err != nil { + return "", err + } + kv[k.Name] = m + } + b, err := json.Marshal(kv) + if err != nil { + return "", err + } + return Key + ":" + string(b), nil + case pe.Value != nil: + b, err := value.ToJSON(*pe.Value) + if err != nil { + return "", err + } + return Value + ":" + string(b), nil + case pe.Index != nil: + return Index + ":" + strconv.Itoa(*pe.Index), nil + default: + return "", errors.New("Invalid type of path element") + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go new file mode 100644 index 000000000..f24c040ed --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "math/rand" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + beforeApplyManagerName string + probability float32 +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, 0.0) +} + +// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, +// or starts tracking on create with p probability. +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, p float32) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + beforeApplyManagerName: "before-first-apply", + probability: p, + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + accessor, err := meta.Accessor(liveObj) + if err != nil { + return newObj, managed, nil + } + + // If managed fields is empty, we need to determine whether to skip tracking managed fields. + if len(managed.Fields()) == 0 { + // Check if the operation is a create, by checking whether lastObj's UID is empty. + // If the operation is create, P(tracking managed fields) = f.probability + // If the operation is update, skip tracking managed fields, since we already know managed fields is empty. + if len(accessor.GetUID()) == 0 { + if f.probability <= rand.Float32() { + return newObj, managed, nil + } + } else { + return newObj, managed, nil + } + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + gvk := appliedObj.GetObjectKind().GroupVersionKind() + emptyObj, err := f.objectCreater.New(gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go new file mode 100644 index 000000000..9b61f3a6f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go new file mode 100644 index 000000000..786ad991c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -0,0 +1,189 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +type structuredMergeManager struct { + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { + if typeConverter == nil { + return nil, fmt.Errorf("typeconverter must not be nil") + } + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + IgnoredFields: resetFields, + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + IgnoredFields: resetFields, + }, + }, nil +} + +func objectGVKNN(obj runtime.Object) string { + name := "" + namespace := "" + if accessor, err := meta.Accessor(obj); err == nil { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + + return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind()) +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned, typed.AllowDuplicates) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err) + } + managed = NewManaged(managedFields, managed.Times()) + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + // Check that the patch object has the same version as the live object + if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchVersion, f.groupVersion)) + } + + patchObjMeta, err := meta.Accessor(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("couldn't get accessor: %v", err) + } + if patchObjMeta.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest("metadata.managedFields must be nil") + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + + // Don't allow duplicates in the applied object. + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + return nil, nil, err + } + managed = NewManaged(managedFields, managed.Times()) + + if newObjTyped == nil { + return nil, managed, nil + } + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go new file mode 100644 index 000000000..c6449467c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/validation/spec" + smdschema "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object, ...typed.ValidationOptions) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +type typeConverter struct { + parser map[schema.GroupVersionKind]*typed.ParseableType +} + +var _ TypeConverter = &typeConverter{} + +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + typeSchema, err := schemaconv.ToSchemaFromOpenAPI(openapiSpec, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + + typeParser := typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}} + tr := indexModels(&typeParser, openapiSpec) + + return &typeConverter{parser: tr}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser[gvk] + if t == nil { + return nil, NewNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return t.FromStructured(obj, opts...) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type deducedTypeConverter struct{} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return deducedTypeConverter{} +} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return typed.DeducedParseableType.FromStructured(obj, opts...) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func indexModels( + typeParser *typed.Parser, + openAPISchemas map[string]*spec.Schema, +) map[schema.GroupVersionKind]*typed.ParseableType { + tr := map[schema.GroupVersionKind]*typed.ParseableType{} + for modelName, model := range openAPISchemas { + gvkList := parseGroupVersionKind(model.Extensions) + if len(gvkList) == 0 { + continue + } + + parsedType := typeParser.Type(modelName) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + tr[schema.GroupVersionKind(gvk)] = &parsedType + } + } + } + return tr +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(extensions map[string]interface{}) []schema.GroupVersionKind { + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions["x-kubernetes-group-version-kind"] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + var group, version, kind string + + // gvk extension list must be a map with group, version, and + // kind fields + if gvkMap, ok := gvk.(map[interface{}]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + + } else if gvkMap, ok := gvk.(map[string]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + } else { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go new file mode 100644 index 000000000..ee1e2bca7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type versionCheckManager struct { + fieldManager Manager + gvk schema.GroupVersionKind +} + +var _ Manager = &versionCheckManager{} + +// NewVersionCheckManager creates a manager that makes sure that the +// applied object is in the proper version. +func NewVersionCheckManager(fieldManager Manager, gvk schema.GroupVersionKind) Manager { + return &versionCheckManager{fieldManager: fieldManager, gvk: gvk} +} + +// Update implements Manager. +func (f *versionCheckManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + // Nothing to do for updates, this is checked in many other places. + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *versionCheckManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if gvk := appliedObj.GetObjectKind().GroupVersionKind(); gvk != f.gvk { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid object type: %v", gvk)) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go new file mode 100644 index 000000000..45855fa4c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// versionConverter is an implementation of +// sigs.k8s.io/structured-merge-diff/merge.Converter +type versionConverter struct { + typeConverter TypeConverter + objectConvertor runtime.ObjectConvertor + hubGetter func(from schema.GroupVersion) schema.GroupVersion +} + +var _ merge.Converter = &versionConverter{} + +// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor. +func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return schema.GroupVersion{ + Group: from.Group, + Version: h.Version, + } + }, + } +} + +// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor. +func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return h + }, + } +} + +// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter +func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) { + // Convert the smd typed value to a kubernetes object. + objectToConvert, err := v.typeConverter.TypedToObject(object) + if err != nil { + return object, err + } + + // Parse the target groupVersion. + groupVersion, err := schema.ParseGroupVersion(string(version)) + if err != nil { + return object, err + } + + // If attempting to convert to the same version as we already have, just return it. + fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion() + if fromVersion == groupVersion { + return object, nil + } + + // Convert to internal + internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion)) + if err != nil { + return object, err + } + + // Convert the object into the target version + convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion) + if err != nil { + return object, err + } + + // Convert the object back to a smd typed value and return it. + return v.typeConverter.ObjectToTyped(convertedObject) +} + +// IsMissingVersionError +func (v *versionConverter) IsMissingVersionError(err error) bool { + return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) +} + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml new file mode 100644 index 000000000..a7f2d54fd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml @@ -0,0 +1,261 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + topology.kubernetes.io/region: us-central1 + topology.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - registry.k8s.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - registry.k8s.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - registry.k8s.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - registry.k8s.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - registry.k8s.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - registry.k8s.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - registry.k8s.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - registry.k8s.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - registry.k8s.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - registry.k8s.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - registry.k8s.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - registry.k8s.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - registry.k8s.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - registry.k8s.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml new file mode 100644 index 000000000..3fb0877d6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go new file mode 100644 index 000000000..48b774cec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go @@ -0,0 +1,174 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +var ( + scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"} + replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas") +) + +// ResourcePathMappings maps a group/version to its replicas path. The +// assumption is that all the paths correspond to leaf fields. +type ResourcePathMappings map[string]fieldpath.Path + +// ScaleHandler manages the conversion of managed fields between a main +// resource and the scale subresource +type ScaleHandler struct { + parentEntries []metav1.ManagedFieldsEntry + groupVersion schema.GroupVersion + mappings ResourcePathMappings +} + +// NewScaleHandler creates a new ScaleHandler +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { + return &ScaleHandler{ + parentEntries: parentEntries, + groupVersion: groupVersion, + mappings: mappings, + } +} + +// ToSubresource filter the managed fields of the main resource and convert +// them so that they can be handled by scale. +// For the managed fields that have a replicas path it performs two changes: +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource +func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { + managed, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + for manager, versionedSet := range managed.Fields() { + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Skip the entry if the APIVersion is unknown + if !ok || path == nil { + continue + } + + if versionedSet.Set().Has(path) { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(replicasPathInScale), + fieldpath.APIVersion(scaleGroupVersion.String()), + versionedSet.Applied(), + ) + + f[manager] = newVersionedSet + t[manager] = managed.Times()[manager] + } + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +// ToParent merges `scaleEntries` with the entries of the main resource and +// transforms them accordingly +func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { + decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + parentFields := decodedParentEntries.Fields() + + decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries) + if err != nil { + return nil, err + } + scaleFields := decodedScaleEntries.Fields() + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + + for manager, versionedSet := range parentFields { + // Get the main resource "replicas" path + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the entry if the APIVersion is unknown. + if !ok { + continue + } + + // If the parent entry does not have the replicas path or it is nil, just + // keep it as it is. The path is nil for Custom Resources without scale + // subresource. + if path == nil || !versionedSet.Set().Has(path) { + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + continue + } + + if _, ok := scaleFields[manager]; !ok { + // "Steal" the replicas path from the main resource entry + newSet := versionedSet.Set().Difference(fieldpath.NewSet(path)) + + if !newSet.Empty() { + newVersionedSet := fieldpath.NewVersionedSet( + newSet, + versionedSet.APIVersion(), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + } else { + // Field wasn't stolen, let's keep the entry as it is. + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + delete(scaleFields, manager) + } + } + + for manager, versionedSet := range scaleFields { + if !versionedSet.Set().Has(replicasPathInScale) { + continue + } + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(h.mappings[h.groupVersion.String()]), + fieldpath.APIVersion(h.groupVersion.String()), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) { + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := internal.EncodeObjectManagedFields(obj, entries); err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + return accessor.GetManagedFields(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go new file mode 100644 index 000000000..d031eefaa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter = internal.TypeConverter + +// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't +// have a schema. It does implement the same interface though (and +// create the same types of objects), so that everything can still work +// the same. CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return internal.NewDeducedTypeConverter() +} + +// NewTypeConverter builds a TypeConverter from a map of OpenAPIV3 schemas. +// This will automatically find the proper version of the object, and the +// corresponding schema information. +// The keys to the map must be consistent with the names +// used by Refs within the schemas. +// The schemas should conform to the Kubernetes Structural Schema OpenAPI +// restrictions found in docs: +// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + return internal.NewTypeConverter(openapiSpec, preserveUnknownFields) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS index 3f72c69ba..349bc69d6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -1,7 +1,6 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- pwittrock + - pwittrock reviewers: -- mengqiy -- apelisse + - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index e39627568..25626cf3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/util/dump" "sigs.k8s.io/yaml" ) @@ -76,7 +76,7 @@ func ToYAMLOrError(v interface{}) string { func toYAML(v interface{}) (string, error) { y, err := yaml.Marshal(v) if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, dump.Pretty(v)) } return string(y), nil @@ -88,8 +88,7 @@ func toYAML(v interface{}) (string, error) { // supports JSON merge patch semantics. // // NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 04432b175..8cc1810af 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -17,7 +17,6 @@ limitations under the License. package net import ( - "bufio" "bytes" "context" "crypto/tls" @@ -446,104 +445,6 @@ type Dialer interface { Dial(req *http.Request) (net.Conn, error) } -// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to -// originalLocation). It returns the opened net.Conn and the raw response bytes. -// If requireSameHostRedirects is true, only redirects to the same host are permitted. -func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) { - const ( - maxRedirects = 9 // Fail on the 10th redirect - maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers - ) - - var ( - location = originalLocation - method = originalMethod - intermediateConn net.Conn - rawResponse = bytes.NewBuffer(make([]byte, 0, 256)) - body = originalBody - ) - - defer func() { - if intermediateConn != nil { - intermediateConn.Close() - } - }() - -redirectLoop: - for redirects := 0; ; redirects++ { - if redirects > maxRedirects { - return nil, nil, fmt.Errorf("too many redirects (%d)", redirects) - } - - req, err := http.NewRequest(method, location.String(), body) - if err != nil { - return nil, nil, err - } - - req.Header = header - - intermediateConn, err = dialer.Dial(req) - if err != nil { - return nil, nil, err - } - - // Peek at the backend response. - rawResponse.Reset() - respReader := bufio.NewReader(io.TeeReader( - io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes. - rawResponse)) // Save the raw response. - resp, err := http.ReadResponse(respReader, nil) - if err != nil { - // Unable to read the backend response; let the client handle it. - klog.Warningf("Error reading backend response: %v", err) - break redirectLoop - } - - switch resp.StatusCode { - case http.StatusFound: - // Redirect, continue. - default: - // Don't redirect. - break redirectLoop - } - - // Redirected requests switch to "GET" according to the HTTP spec: - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 - method = "GET" - // don't send a body when following redirects - body = nil - - resp.Body.Close() // not used - - // Prepare to follow the redirect. - redirectStr := resp.Header.Get("Location") - if redirectStr == "" { - return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode) - } - // We have to parse relative to the current location, NOT originalLocation. For example, - // if we request http://foo.com/a and get back "http://bar.com/b", the result should be - // http://bar.com/b. If we then make that request and get back a redirect to "/c", the result - // should be http://bar.com/c, not http://foo.com/c. - location, err = location.Parse(redirectStr) - if err != nil { - return nil, nil, fmt.Errorf("malformed Location header: %v", err) - } - - // Only follow redirects to the same host. Otherwise, propagate the redirect response back. - if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { - return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) - } - - // Reset the connection. - intermediateConn.Close() - intermediateConn = nil - } - - connToReturn := intermediateConn - intermediateConn = nil // Don't close the connection when we return it. - return connToReturn, rawResponse.Bytes(), nil -} - // CloneRequest creates a shallow copy of the request along with a deep copy of the Headers. func CloneRequest(req *http.Request) *http.Request { r := new(http.Request) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 822416806..01d028e72 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -339,7 +339,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam for _, addr := range addrs { ip, _, err := netutils.ParseCIDRSloppy(addr.String()) if err != nil { - return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) + return nil, fmt.Errorf("unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 1c2aba55f..1635e69a5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -20,6 +20,7 @@ import ( "errors" "net" "reflect" + "strings" "syscall" ) @@ -47,6 +48,11 @@ func IsConnectionReset(err error) bool { return false } +// Returns if the given err is "http2: client connection lost" error. +func IsHTTP2ConnectionLost(err error) bool { + return err != nil && strings.Contains(err.Error(), "http2: client connection lost") +} + // Returns if the given err is "connection refused" error func IsConnectionRefused(err error) bool { var errno syscall.Errno diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 035c52811..4fe0c5eb2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "net/http" "runtime" @@ -27,14 +28,15 @@ import ( ) var ( - // ReallyCrash controls the behavior of HandleCrash and now defaults - // true. It's still exposed so components can optionally set to false - // to restore prior behavior. + // ReallyCrash controls the behavior of HandleCrash and defaults to + // true. It's exposed so components can optionally set to false + // to restore prior behavior. This flag is mostly used for tests to validate + // crash conditions. ReallyCrash = true ) // PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} +var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via // defer. Additional context-specific handlers can be provided, and will be @@ -42,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic} // handlers and logging the panic message. // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) + additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) + for i, handler := range additionalHandlers { + handler := handler // capture loop variable + additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) { + handler(r) + } } + + handleCrash(context.Background(), r, additionalHandlersWithContext...) + } +} + +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// The context is used to determine how to log. +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of HandleCrash and HandleCrash. +// Having those call a common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + for _, fn := range PanicHandlers { + fn(ctx, r) + } + for _, fn := range additionalHandlers { + fn(ctx, r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) } } // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { +func logPanic(ctx context.Context, r interface{}) { if r == http.ErrAbortHandler { // honor the http.ErrAbortHandler sentinel panic value: // ErrAbortHandler is a sentinel panic value to abort a handler. @@ -72,10 +105,20 @@ func logPanic(r interface{}) { const size = 64 << 10 stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + + // For backwards compatibility, conversion to string + // is handled here instead of defering to the logging + // backend. if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace)) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace)) } } @@ -83,35 +126,76 @@ func logPanic(r interface{}) { // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ +var ErrorHandlers = []ErrorHandler{ logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, + func(_ context.Context, _ error, _ string, _ ...interface{}) { + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError() + }, } +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) + // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. +// +// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { return } + handleError(context.Background(), err, "Unhandled Error") +} + +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. The context is used to +// determine how to log the error. +// +// If contextual logging is enabled, the default log output is equivalent to +// +// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...) +// +// Without contextual logging, it is equivalent to: +// +// klog.ErrorS(err, msg, keysAndValues...) +// +// In contrast to HandleError, passing nil for the error is still going to +// trigger a log entry. Don't construct a new error or wrap an error +// with fmt.Errorf. Instead, add additional information via the mssage +// and key/value pairs. +// +// This variant should be used instead of HandleError because it supports +// structured, contextual logging. +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + handleError(ctx, err, msg, keysAndValues...) +} + +// handleError is the common implementation of HandleError and HandleErrorWithContext. +// Using this common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { for _, fn := range ErrorHandlers { - fn(err) + fn(ctx, err, msg, keysAndValues...) } } -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - klog.ErrorDepth(2, err) +// logError prints an error with the call stack of the location it was reported. +// It expects to be called as -> HandleError[WithContext] -> handleError -> logError. +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + logger := klog.FromContext(ctx).WithCallDepth(3) + logger = klog.LoggerWithName(logger, "UnhandledError") + logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs. } type rudimentaryErrorBackoff struct { @@ -124,15 +208,18 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { +func (r *rudimentaryErrorBackoff) OnError() { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. @@ -141,7 +228,7 @@ func GetCaller() string { runtime.Callers(3, pc[:]) f := runtime.FuncForPC(pc[0]) if f == nil { - return fmt.Sprintf("Unable to find caller") + return "Unable to find caller" } return f.Name() } diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 9bfa85d43..4d7a17c3a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +// Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[byte]{} +// s2 := New[byte]() type Byte map[byte]Empty // NewByte creates a Byte from a list of values. func NewByte(items ...byte) Byte { - ss := Byte{} - ss.Insert(items...) - return ss + return Byte(New[byte](items...)) } // ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func ByteKeySet(theMap interface{}) Byte { - v := reflect.ValueOf(theMap) - ret := Byte{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(byte)) - } - return ret +func ByteKeySet[T any](theMap map[byte]T) Byte { + return Byte(KeySet(theMap)) } // Insert adds items to the set. func (s Byte) Insert(items ...byte) Byte { - for _, item := range items { - s[item] = Empty{} - } - return s + return Byte(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Byte) Delete(items ...byte) Byte { - for _, item := range items { - delete(s, item) - } - return s + return Byte(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Byte) Has(item byte) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Byte) HasAll(items ...byte) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Byte) HasAny(items ...byte) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Byte) Clone() Byte { + return Byte(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Byte) Difference(s2 Byte) Byte { - result := NewByte() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Byte) Difference(s2 Byte) Byte { + return Byte(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Byte) SymmetricDifference(s2 Byte) Byte { + return Byte(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Byte) Difference(s2 Byte) Byte { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Byte) Union(s2 Byte) Byte { - result := NewByte() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Byte(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Byte) Union(s2 Byte) Byte { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Byte) Intersection(s2 Byte) Byte { - var walk, other Byte - result := NewByte() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Byte(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Byte) IsSuperset(s2 Byte) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Byte) Equal(s2 Byte) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfByte []byte - -func (s sortableSliceOfByte) Len() int { return len(s) } -func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } -func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted byte slice. func (s Byte) List() []byte { - res := make(sortableSliceOfByte, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []byte(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Byte) UnsortedList() []byte { - res := make([]byte, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Byte) PopAny() (byte, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue byte - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Byte) Len() int { return len(s) } - -func lessByte(lhs, rhs byte) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index b152a0bf0..fd281bdb8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - -// Package sets has auto-generated set types. -package sets +// Package sets has generic set and specified sets. Generic set will +// replace specified ones over time. And specific ones are deprecated. +package sets // import "k8s.io/apimachinery/pkg/util/sets" diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go index e11e622c5..fbb1df06d 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets // Empty is public since it is used by some internal API objects for conversions between external diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index 88bd70967..5876fc9de 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +// Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int]{} +// s2 := New[int]() type Int map[int]Empty // NewInt creates a Int from a list of values. func NewInt(items ...int) Int { - ss := Int{} - ss.Insert(items...) - return ss + return Int(New[int](items...)) } // IntKeySet creates a Int from a keys of a map[int](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func IntKeySet(theMap interface{}) Int { - v := reflect.ValueOf(theMap) - ret := Int{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int)) - } - return ret +func IntKeySet[T any](theMap map[int]T) Int { + return Int(KeySet(theMap)) } // Insert adds items to the set. func (s Int) Insert(items ...int) Int { - for _, item := range items { - s[item] = Empty{} - } - return s + return Int(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Int) Delete(items ...int) Int { - for _, item := range items { - delete(s, item) - } - return s + return Int(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Int) Has(item int) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Int) HasAll(items ...int) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Int) HasAny(items ...int) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Int) Clone() Int { + return Int(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Int) Difference(s2 Int) Int { - result := NewInt() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Int) Difference(s2 Int) Int { + return Int(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int) SymmetricDifference(s2 Int) Int { + return Int(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Int) Difference(s2 Int) Int { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Int) Union(s2 Int) Int { - result := NewInt() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Int(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Int) Union(s2 Int) Int { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Int) Intersection(s2 Int) Int { - var walk, other Int - result := NewInt() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Int(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Int) IsSuperset(s2 Int) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Int) Equal(s2 Int) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfInt []int - -func (s sortableSliceOfInt) Len() int { return len(s) } -func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } -func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted int slice. func (s Int) List() []int { - res := make(sortableSliceOfInt, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Int) UnsortedList() []int { - res := make([]int, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Int) PopAny() (int, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Int) Len() int { return len(s) } - -func lessInt(lhs, rhs int) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go index 96a485554..2c640c5d0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +// Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int32]{} +// s2 := New[int32]() type Int32 map[int32]Empty // NewInt32 creates a Int32 from a list of values. func NewInt32(items ...int32) Int32 { - ss := Int32{} - ss.Insert(items...) - return ss + return Int32(New[int32](items...)) } // Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func Int32KeySet(theMap interface{}) Int32 { - v := reflect.ValueOf(theMap) - ret := Int32{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int32)) - } - return ret +func Int32KeySet[T any](theMap map[int32]T) Int32 { + return Int32(KeySet(theMap)) } // Insert adds items to the set. func (s Int32) Insert(items ...int32) Int32 { - for _, item := range items { - s[item] = Empty{} - } - return s + return Int32(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Int32) Delete(items ...int32) Int32 { - for _, item := range items { - delete(s, item) - } - return s + return Int32(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Int32) Has(item int32) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Int32) HasAll(items ...int32) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Int32) HasAny(items ...int32) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Int32) Clone() Int32 { + return Int32(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Int32) Difference(s2 Int32) Int32 { - result := NewInt32() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Int32) Difference(s2 Int32) Int32 { + return Int32(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int32) SymmetricDifference(s2 Int32) Int32 { + return Int32(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Int32) Difference(s2 Int32) Int32 { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Int32) Union(s2 Int32) Int32 { - result := NewInt32() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Int32(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Int32) Union(s2 Int32) Int32 { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Int32) Intersection(s2 Int32) Int32 { - var walk, other Int32 - result := NewInt32() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Int32(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Int32) IsSuperset(s2 Int32) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Int32) Equal(s2 Int32) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfInt32 []int32 - -func (s sortableSliceOfInt32) Len() int { return len(s) } -func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } -func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted int32 slice. func (s Int32) List() []int32 { - res := make(sortableSliceOfInt32, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int32(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Int32) UnsortedList() []int32 { - res := make([]int32, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Int32) PopAny() (int32, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int32 - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Int32) Len() int { return len(s) } - -func lessInt32(lhs, rhs int32) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index b375a1b06..bf3eb3ffa 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +// Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int64]{} +// s2 := New[int64]() type Int64 map[int64]Empty // NewInt64 creates a Int64 from a list of values. func NewInt64(items ...int64) Int64 { - ss := Int64{} - ss.Insert(items...) - return ss + return Int64(New[int64](items...)) } // Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func Int64KeySet(theMap interface{}) Int64 { - v := reflect.ValueOf(theMap) - ret := Int64{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int64)) - } - return ret +func Int64KeySet[T any](theMap map[int64]T) Int64 { + return Int64(KeySet(theMap)) } // Insert adds items to the set. func (s Int64) Insert(items ...int64) Int64 { - for _, item := range items { - s[item] = Empty{} - } - return s + return Int64(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s Int64) Delete(items ...int64) Int64 { - for _, item := range items { - delete(s, item) - } - return s + return Int64(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s Int64) Has(item int64) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s Int64) HasAll(items ...int64) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s Int64) HasAny(items ...int64) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s Int64) Clone() Int64 { + return Int64(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s Int64) Difference(s2 Int64) Int64 { - result := NewInt64() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 Int64) Difference(s2 Int64) Int64 { + return Int64(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int64) SymmetricDifference(s2 Int64) Int64 { + return Int64(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s Int64) Difference(s2 Int64) Int64 { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 Int64) Union(s2 Int64) Int64 { - result := NewInt64() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return Int64(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 Int64) Union(s2 Int64) Int64 { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 Int64) Intersection(s2 Int64) Int64 { - var walk, other Int64 - result := NewInt64() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return Int64(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 Int64) IsSuperset(s2 Int64) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 Int64) Equal(s2 Int64) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfInt64 []int64 - -func (s sortableSliceOfInt64) Len() int { return len(s) } -func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } -func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted int64 slice. func (s Int64) List() []int64 { - res := make(sortableSliceOfInt64, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int64(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s Int64) UnsortedList() []int64 { - res := make([]int64, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s Int64) PopAny() (int64, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int64 - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s Int64) Len() int { return len(s) } - -func lessInt64(lhs, rhs int64) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go new file mode 100644 index 000000000..cd961c8c5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -0,0 +1,236 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "cmp" + "sort" +) + +// Set is a set of the same type elements, implemented via map[comparable]struct{} for minimal memory consumption. +type Set[T comparable] map[T]Empty + +// cast transforms specified set to generic Set[T]. +func cast[T comparable](s map[T]Empty) Set[T] { return s } + +// New creates a Set from a list of values. +// NOTE: type param must be explicitly instantiated if given items are empty. +func New[T comparable](items ...T) Set[T] { + ss := make(Set[T], len(items)) + ss.Insert(items...) + return ss +} + +// KeySet creates a Set from a keys of a map[comparable](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func KeySet[T comparable, V any](theMap map[T]V) Set[T] { + ret := make(Set[T], len(theMap)) + for keyValue := range theMap { + ret.Insert(keyValue) + } + return ret +} + +// Insert adds items to the set. +func (s Set[T]) Insert(items ...T) Set[T] { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +func Insert[T comparable](set Set[T], items ...T) Set[T] { + return set.Insert(items...) +} + +// Delete removes all items from the set. +func (s Set[T]) Delete(items ...T) Set[T] { + for _, item := range items { + delete(s, item) + } + return s +} + +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +func (s Set[T]) Clear() Set[T] { + clear(s) + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Set[T]) Has(item T) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Set[T]) HasAll(items ...T) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Set[T]) HasAny(items ...T) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Clone returns a new set which is a copy of the current set. +func (s Set[T]) Clone() Set[T] { + result := make(Set[T], len(s)) + for key := range s { + result.Insert(key) + } + return result +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 Set[T]) Difference(s2 Set[T]) Set[T] { + result := New[T]() + for key := range s1 { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Set[T]) SymmetricDifference(s2 Set[T]) Set[T] { + return s1.Difference(s2).Union(s2.Difference(s1)) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Set[T]) Union(s2 Set[T]) Set[T] { + result := s1.Clone() + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Set[T]) Intersection(s2 Set[T]) Set[T] { + var walk, other Set[T] + result := New[T]() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Set[T]) IsSuperset(s2 Set[T]) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Set[T]) Equal(s2 Set[T]) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfGeneric[T cmp.Ordered] []T + +func (g sortableSliceOfGeneric[T]) Len() int { return len(g) } +func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) } +func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +// List returns the contents as a sorted T slice. +// +// This is a separate function and not a method because not all types supported +// by Generic are ordered and only those can be sorted. +func List[T cmp.Ordered](s Set[T]) []T { + res := make(sortableSliceOfGeneric[T], 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return res +} + +// UnsortedList returns the slice with contents in random order. +func (s Set[T]) UnsortedList() []T { + res := make([]T, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// PopAny returns a single element from the set. +func (s Set[T]) PopAny() (T, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue T + return zeroValue, false +} + +// Len returns the size of the set. +func (s Set[T]) Len() int { + return len(s) +} + +func less[T cmp.Ordered](lhs, rhs T) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index e6f37db88..1dab6d13c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,93 +14,75 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. - package sets -import ( - "reflect" - "sort" -) - -// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +// String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[string]{} +// s2 := New[string]() type String map[string]Empty // NewString creates a String from a list of values. func NewString(items ...string) String { - ss := String{} - ss.Insert(items...) - return ss + return String(New[string](items...)) } // StringKeySet creates a String from a keys of a map[string](? extends interface{}). // If the value passed in is not actually a map, this will panic. -func StringKeySet(theMap interface{}) String { - v := reflect.ValueOf(theMap) - ret := String{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(string)) - } - return ret +func StringKeySet[T any](theMap map[string]T) String { + return String(KeySet(theMap)) } // Insert adds items to the set. func (s String) Insert(items ...string) String { - for _, item := range items { - s[item] = Empty{} - } - return s + return String(cast(s).Insert(items...)) } // Delete removes all items from the set. func (s String) Delete(items ...string) String { - for _, item := range items { - delete(s, item) - } - return s + return String(cast(s).Delete(items...)) } // Has returns true if and only if item is contained in the set. func (s String) Has(item string) bool { - _, contained := s[item] - return contained + return cast(s).Has(item) } // HasAll returns true if and only if all items are contained in the set. func (s String) HasAll(items ...string) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true + return cast(s).HasAll(items...) } // HasAny returns true if any items are contained in the set. func (s String) HasAny(items ...string) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false + return cast(s).HasAny(items...) } -// Difference returns a set of objects that are not in s2 +// Clone returns a new set which is a copy of the current set. +func (s String) Clone() String { + return String(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. // For example: // s1 = {a1, a2, a3} // s2 = {a1, a2, a4, a5} // s1.Difference(s2) = {a3} // s2.Difference(s1) = {a4, a5} -func (s String) Difference(s2 String) String { - result := NewString() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result +func (s1 String) Difference(s2 String) String { + return String(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 String) SymmetricDifference(s2 String) String { + return String(cast(s1).SymmetricDifference(cast(s2))) } // Union returns a new set which includes items in either s1 or s2. @@ -110,14 +92,7 @@ func (s String) Difference(s2 String) String { // s1.Union(s2) = {a1, a2, a3, a4} // s2.Union(s1) = {a1, a2, a3, a4} func (s1 String) Union(s2 String) String { - result := NewString() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result + return String(cast(s1).Union(cast(s2))) } // Intersection returns a new set which includes the item in BOTH s1 and s2 @@ -126,80 +101,37 @@ func (s1 String) Union(s2 String) String { // s2 = {a2, a3} // s1.Intersection(s2) = {a2} func (s1 String) Intersection(s2 String) String { - var walk, other String - result := NewString() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result + return String(cast(s1).Intersection(cast(s2))) } // IsSuperset returns true if and only if s1 is a superset of s2. func (s1 String) IsSuperset(s2 String) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true + return cast(s1).IsSuperset(cast(s2)) } // Equal returns true if and only if s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) func (s1 String) Equal(s2 String) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) + return cast(s1).Equal(cast(s2)) } -type sortableSliceOfString []string - -func (s sortableSliceOfString) Len() int { return len(s) } -func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } -func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - // List returns the contents as a sorted string slice. func (s String) List() []string { - res := make(sortableSliceOfString, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []string(res) + return List(cast(s)) } // UnsortedList returns the slice with contents in random order. func (s String) UnsortedList() []string { - res := make([]string, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res + return cast(s).UnsortedList() } -// Returns a single element from the set. +// PopAny returns a single element from the set. func (s String) PopAny() (string, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue string - return zeroValue, false + return cast(s).PopAny() } // Len returns the size of the set. func (s String) Len() int { return len(s) } - -func lessString(lhs, rhs string) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index cfee199fa..73244449f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,8 +1,9 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- pwittrock -- mengqiy + - apelisse + - pwittrock reviewers: -- mengqiy -- apelisse + - apelisse +emeritus_approvers: + - mengqiy diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index d49a56536..85b0cfc07 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -20,12 +20,17 @@ import ( "errors" "fmt" "reflect" + "strings" "k8s.io/apimachinery/pkg/util/mergepatch" forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" openapi "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" ) +const patchMergeKey = "x-kubernetes-patch-merge-key" +const patchStrategy = "x-kubernetes-patch-strategy" + type PatchMeta struct { patchStrategies []string patchMergeKey string @@ -105,7 +110,7 @@ func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatc // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 // If the underlying element is either an array or a slice, return its element type. - case reflect.Ptr: + case reflect.Pointer: t = t.Elem() if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { t = t.Elem() @@ -129,7 +134,7 @@ func getTagStructType(dataStruct interface{}) (reflect.Type, error) { t := reflect.TypeOf(dataStruct) // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } @@ -148,6 +153,90 @@ func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { return t } +type PatchMetaFromOpenAPIV3 struct { + // SchemaList is required to resolve OpenAPI V3 references + SchemaList map[string]*spec.Schema + Schema *spec.Schema +} + +func (s PatchMetaFromOpenAPIV3) traverse(key string) (PatchMetaFromOpenAPIV3, error) { + if s.Schema == nil { + return PatchMetaFromOpenAPIV3{}, nil + } + if len(s.Schema.Properties) == 0 { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + subschema, ok := s.Schema.Properties[key] + if !ok { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + return PatchMetaFromOpenAPIV3{SchemaList: s.SchemaList, Schema: &subschema}, nil +} + +func resolve(l *PatchMetaFromOpenAPIV3) error { + if len(l.Schema.AllOf) > 0 { + l.Schema = &l.Schema.AllOf[0] + } + if refString := l.Schema.Ref.String(); refString != "" { + str := strings.TrimPrefix(refString, "#/components/schemas/") + sch, ok := l.SchemaList[str] + if ok { + l.Schema = sch + } else { + return fmt.Errorf("unable to resolve %s in OpenAPI V3", refString) + } + } + return nil +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + if l.Schema.Items != nil { + l.Schema = l.Schema.Items.Schema + } + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) Name() string { + schema := s.Schema + if len(schema.Type) > 0 { + return strings.Join(schema.Type, "") + } + return "Struct" +} + type PatchMetaFromOpenAPI struct { Schema openapi.Schema } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 1aedaa156..6825a808e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1106,7 +1106,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // Then, sort them by the relative order in setElementOrder, patch list and live list. // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +// Ref: https://git.k8s.io/design-proposals-archive/cli/preserve-order-in-strategic-merge-patch.md func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive @@ -1182,7 +1182,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, schema Looku merged = originalFieldValue case !foundOriginal && foundPatch: // list was added - merged = patchFieldValue + v, keep := removeDirectives(patchFieldValue) + if !keep { + // Shouldn't be possible since patchFieldValue is a slice + continue + } + + merged = v.([]interface{}) case foundOriginal && foundPatch: merged, err = mergeSliceHandler(originalList, patchList, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) @@ -1270,6 +1276,42 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey return patch, serverOnly, nil } +// Removes directives from an object and returns value to use instead and whether +// or not the field/index should even be kept +// May modify input +func removeDirectives(obj interface{}) (interface{}, bool) { + if obj == nil { + return obj, true + } else if typedV, ok := obj.(map[string]interface{}); ok { + if _, hasDirective := typedV[directiveMarker]; hasDirective { + return nil, false + } + + for k, v := range typedV { + var keep bool + typedV[k], keep = removeDirectives(v) + if !keep { + delete(typedV, k) + } + } + return typedV, true + } else if typedV, ok := obj.([]interface{}); ok { + var res []interface{} + if typedV != nil { + // Make sure res is non-nil if patch is non-nil + res = []interface{}{} + } + for _, v := range typedV { + if newV, keep := removeDirectives(v); keep { + res = append(res, newV) + } + } + return res, true + } else { + return obj, true + } +} + // Merge fields from a patch map into the original map. Note: This may modify // both the original map and the patch because getting a deep copy of a map in // golang is highly non-trivial. @@ -1319,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me // original. Otherwise, check if we want to preserve it or skip it. // Preserving the null value is useful when we want to send an explicit // delete to the API server. + // In some cases, this may lead to inconsistent behavior with create. + // ref: https://github.com/kubernetes/kubernetes/issues/123304 + // To avoid breaking compatibility, + // we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent. if patchV == nil { delete(original, k) if mergeOptions.IgnoreUnmatchedNulls { @@ -1330,7 +1376,13 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me if !ok { if !isDeleteList { // If it's not in the original document, just take the patch value. - original[k] = patchV + if mergeOptions.IgnoreUnmatchedNulls { + discardNullValuesFromPatch(patchV) + } + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1339,7 +1391,13 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me patchType := reflect.TypeOf(patchV) if originalType != patchType { if !isDeleteList { - original[k] = patchV + if mergeOptions.IgnoreUnmatchedNulls { + discardNullValuesFromPatch(patchV) + } + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1366,7 +1424,11 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me } original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + // if patchV itself is a directive, then don't keep it + delete(original, k) + } } if err != nil { return nil, err @@ -1375,6 +1437,25 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me return original, nil } +// discardNullValuesFromPatch discards all null property values from patch. +// It traverses all slices and map types. +func discardNullValuesFromPatch(patchV interface{}) { + switch patchV := patchV.(type) { + case map[string]interface{}: + for k, v := range patchV { + if v == nil { + delete(patchV, k) + } else { + discardNullValuesFromPatch(v) + } + } + case []interface{}: + for _, v := range patchV { + discardNullValuesFromPatch(v) + } + } +} + // mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting // fieldPatchStrategy and mergeOptions. func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, @@ -1400,7 +1481,8 @@ func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, return nil, err } - if fieldPatchStrategy == mergeDirective { + // Delete lists are handled the same way regardless of what the field's patch strategy is + if fieldPatchStrategy == mergeDirective || isDeleteList { return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) } else { return typedPatch, nil diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS new file mode 100644 index 000000000..402373247 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 2ed368f56..bc387d011 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -42,19 +42,31 @@ func (v *Error) Error() string { return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) } +type OmitValueType struct{} + +var omitValue = OmitValueType{} + // ErrorBody returns the error message without the field name. This is useful // for building nice-looking higher-level error reporting. func (v *Error) ErrorBody() string { var s string - switch v.Type { - case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + switch { + case v.Type == ErrorTypeRequired: + s = v.Type.String() + case v.Type == ErrorTypeForbidden: + s = v.Type.String() + case v.Type == ErrorTypeTooLong: + s = v.Type.String() + case v.Type == ErrorTypeInternal: + s = v.Type.String() + case v.BadValue == omitValue: s = v.Type.String() default: value := v.BadValue valueType := reflect.TypeOf(value) if value == nil || valueType == nil { value = "null" - } else if valueType.Kind() == reflect.Ptr { + } else if valueType.Kind() == reflect.Pointer { if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { value = "null" } else { @@ -123,6 +135,8 @@ const ( // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" + // ErrorTypeTypeInvalid is for the value did not match the schema type for that field + ErrorTypeTypeInvalid ErrorType = "FieldValueTypeInvalid" ) // String converts a ErrorType into its corresponding canonical error message. @@ -146,11 +160,18 @@ func (t ErrorType) String() string { return "Too many" case ErrorTypeInternal: return "Internal error" + case ErrorTypeTypeInvalid: + return "Invalid value" default: panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) } } +// TypeInvalid returns a *Error indicating "type is invalid" +func TypeInvalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeTypeInvalid, field.String(), value, detail} +} + // NotFound returns a *Error indicating "value not found". This is // used to report failure to find a requested value (e.g. looking up an ID). func NotFound(field *Path, value interface{}) *Error { @@ -179,12 +200,12 @@ func Invalid(field *Path, value interface{}, detail string) *Error { // NotSupported returns a *Error indicating "unsupported value". // This is used to report unknown values for enumerated fields (e.g. a list of // valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { +func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *Error { detail := "" if len(validValues) > 0 { quotedValues := make([]string, len(validValues)) for i, v := range validValues { - quotedValues[i] = strconv.Quote(v) + quotedValues[i] = strconv.Quote(fmt.Sprint(v)) } detail = "supported values: " + strings.Join(quotedValues, ", ") } @@ -207,11 +228,40 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error { return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} } +// TooLongMaxLength returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. If maxLength is negative, no max length will be included in the message. +func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error { + var msg string + if maxLength >= 0 { + msg = fmt.Sprintf("may not be longer than %d", maxLength) + } else { + msg = "value is too long" + } + return &Error{ErrorTypeTooLong, field.String(), value, msg} +} + // TooMany returns a *Error indicating "too many". This is used to // report that a given list has too many items. This is similar to TooLong, // but the returned error indicates quantity instead of length. func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { - return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} + var msg string + + if maxQuantity >= 0 { + msg = fmt.Sprintf("must have at most %d items", maxQuantity) + } else { + msg = "has too many items" + } + + var actual interface{} + if actualQuantity >= 0 { + actual = actualQuantity + } else { + actual = omitValue + } + + return &Error{ErrorTypeTooMany, field.String(), actual, msg} } // InternalError returns a *Error indicating "internal error". This is used diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 83df4fb8d..b32644902 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -19,10 +19,9 @@ package validation import ( "fmt" "math" - "net" "regexp" - "strconv" "strings" + "unicode" "k8s.io/apimachinery/pkg/util/validation/field" netutils "k8s.io/utils/net" @@ -191,7 +190,13 @@ func IsDNS1123Label(value string) []string { errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) } if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + if dns1123SubdomainRegexp.MatchString(value) { + // It was a valid subdomain and not a valid label. Since we + // already checked length, it must be dots. + errs = append(errs, "must not contain dots") + } else { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } } return errs } @@ -334,7 +339,7 @@ func IsValidPortName(port string) []string { errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") } if !portNameOneLetterRegexp.MatchString(port) { - errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + errs = append(errs, "must contain at least one letter (a-z)") } if strings.Contains(port, "--") { errs = append(errs, "must not contain consecutive hyphens") @@ -346,11 +351,12 @@ func IsValidPortName(port string) []string { } // IsValidIP tests that the argument is a valid IP address. -func IsValidIP(value string) []string { +func IsValidIP(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList if netutils.ParseIPSloppy(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) } - return nil + return allErrors } // IsValidIPv4Address tests that the argument is a valid IPv4 address. @@ -373,6 +379,16 @@ func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { return allErrors } +// IsValidCIDR tests that the argument is a valid CIDR value. +func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + _, _, err := netutils.ParseCIDRSloppy(value) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) + } + return allErrors +} + const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" @@ -403,6 +419,9 @@ func IsHTTPHeaderName(value string) []string { const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" +// TODO(hirazawaui): Rename this when the RelaxedEnvironmentVariableValidation gate is removed. +const relaxedEnvVarNameFmtErrMsg string = "a valid environment variable name must consist only of printable ASCII characters other than '='" + var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") // IsEnvVarName tests if a string is a valid environment variable name. @@ -416,6 +435,24 @@ func IsEnvVarName(value string) []string { return errs } +// IsRelaxedEnvVarName tests if a string is a valid environment variable name. +func IsRelaxedEnvVarName(value string) []string { + var errs []string + + if len(value) == 0 { + errs = append(errs, "environment variable name "+EmptyError()) + } + + for _, r := range value { + if r > unicode.MaxASCII || !unicode.IsPrint(r) || r == '=' { + errs = append(errs, relaxedEnvVarNameFmtErrMsg) + break + } + } + + return errs +} + const configMapKeyFmt = `[-._a-zA-Z0-9]+` const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" @@ -487,18 +524,3 @@ func hasChDirPrefix(value string) []string { } return errs } - -// IsValidSocketAddr checks that string represents a valid socket address -// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) -func IsValidSocketAddr(value string) []string { - var errs []string - ip, port, err := net.SplitHostPort(value) - if err != nil { - errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") - return errs - } - portInt, _ := strconv.Atoi(port) - errs = append(errs, IsValidPortNum(portInt)...) - errs = append(errs, IsValidIP(ip)...) - return errs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go new file mode 100644 index 000000000..418761925 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go @@ -0,0 +1,502 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" +) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step returns an amount of time to sleep determined by the original +// Duration and Jitter. The backoff is mutated to update its Steps and +// Duration. A nil Backoff always has a zero-duration step. +func (b *Backoff) Step() time.Duration { + if b == nil { + return 0 + } + var nextDuration time.Duration + nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter) + return nextDuration +} + +// DelayFunc returns a function that will compute the next interval to +// wait given the arguments in b. It does not mutate the original backoff +// but the function is safe to use only from a single goroutine. +func (b Backoff) DelayFunc() DelayFunc { + steps := b.Steps + duration := b.Duration + cap := b.Cap + factor := b.Factor + jitter := b.Jitter + + return func() time.Duration { + var nextDuration time.Duration + // jitter is applied per step and is not cumulative over multiple steps + nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter) + return nextDuration + } +} + +// Timer returns a timer implementation appropriate to this backoff's parameters +// for use with wait functions. +func (b Backoff) Timer() Timer { + if b.Steps > 1 || b.Jitter != 0 { + return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()} + } + if b.Duration > 0 { + return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration} + } + return newNoopTimer() +} + +// delay implements the core delay algorithm used in this package. +func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) { + // when steps is non-positive, do not alter the base duration + if steps < 1 { + if jitter > 0 { + return Jitter(duration, jitter), duration, 0 + } + return duration, duration, 0 + } + steps-- + + // calculate the next step's interval + if factor != 0 { + next = time.Duration(float64(duration) * factor) + if cap > 0 && next > cap { + next = cap + steps = 0 + } + } else { + next = duration + } + + // add jitter for this step + if jitter > 0 { + duration = Jitter(duration, jitter) + } + + return duration, next, steps + +} + +// DelayWithReset returns a DelayFunc that will return the appropriate next interval to +// wait. Every resetInterval the backoff parameters are reset to their initial state. +// This method is safe to invoke from multiple goroutines, but all calls will advance +// the backoff state when Factor is set. If Factor is zero, this method is the same as +// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is +// zero no backoff will be performed as the same calling DelayFunc with a zero factor +// and steps. +func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc { + if b.Factor <= 0 { + return b.DelayFunc() + } + if resetInterval <= 0 { + b.Steps = 0 + b.Factor = 0 + return b.DelayFunc() + } + return (&backoffManager{ + backoff: b, + initialBackoff: b, + resetInterval: resetInterval, + + clock: c, + lastStart: c.Now(), + timer: nil, + }).Step +} + +// Until loops until stop channel is closed, running f every period. +// +// Until is syntactic sugar on top of JitterUntil with zero jitter factor and +// with sliding = true (which means the timer for period starts after the f +// completes). +func Until(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, true, stopCh) +} + +// UntilWithContext loops until context is done, running f every period. +// +// UntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor and with sliding = true (which means the timer +// for period starts after the f completes). +func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, true) +} + +// NonSlidingUntil loops until stop channel is closed, running f every +// period. +// +// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter +// factor, with sliding = false (meaning the timer for period starts at the same +// time as the function starts). +func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, false, stopCh) +} + +// NonSlidingUntilWithContext loops until context is done, running f every +// period. +// +// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor, with sliding = false (meaning the timer for period +// starts at the same time as the function starts). +func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, false) +} + +// JitterUntil loops until stop channel is closed, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Close stopCh to stop. f may not be invoked if stop channel is already +// closed. Pass NeverStop to if you don't want it stop. +func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { + BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) +} + +// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + var t clock.Timer + for { + select { + case <-stopCh: + return + default: + } + + if !sliding { + t = backoff.Backoff() + } + + func() { + defer runtime.HandleCrash() + f() + }() + + if sliding { + t = backoff.Backoff() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and stopCh, and t.C select falls through. + // In order to mitigate we re-check stopCh at the beginning + // of every loop to prevent extra executions of f(). + select { + case <-stopCh: + if !t.Stop() { + <-t.C() + } + return + case <-t.C(): + } + } +} + +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already expired. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) +} + +// backoffManager provides simple backoff behavior in a threadsafe manner to a caller. +type backoffManager struct { + backoff Backoff + initialBackoff Backoff + resetInterval time.Duration + + clock clock.Clock + + lock sync.Mutex + lastStart time.Time + timer clock.Timer +} + +// Step returns the expected next duration to wait. +func (b *backoffManager) Step() time.Duration { + b.lock.Lock() + defer b.lock.Unlock() + + switch { + case b.resetInterval == 0: + b.backoff = b.initialBackoff + case b.clock.Now().Sub(b.lastStart) > b.resetInterval: + b.backoff = b.initialBackoff + b.lastStart = b.clock.Now() + } + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer +// for exponential backoff. The returned timer must be drained before calling Backoff() the second +// time. +func (b *backoffManager) Backoff() clock.Timer { + b.lock.Lock() + defer b.lock.Unlock() + if b.timer == nil { + b.timer = b.clock.NewTimer(b.Step()) + } else { + b.timer.Reset(b.Step()) + } + return b.timer +} + +// Timer returns a new Timer instance that shares the clock and the reset behavior with all other +// timers. +func (b *backoffManager) Timer() Timer { + return DelayFunc(b.Step).Timer(b.clock) +} + +// BackoffManager manages backoff with a particular scheme based on its underlying implementation. +type BackoffManager interface { + // Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not + // safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff + // until Timer.C() drains. If the second Backoff() is called before the timer from the first + // Backoff() call finishes, the first timer will NOT be drained and result in undetermined + // behavior. + Backoff() clock.Timer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type exponentialBackoffManagerImpl struct { + backoff *Backoff + backoffTimer clock.Timer + lastBackoffStart time.Time + initialBackoff time.Duration + backoffResetDuration time.Duration + clock clock.Clock +} + +// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and +// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. +// This backoff manager is used to reduce load during upstream unhealthiness. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then +// invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// delayFn := wait.Backoff{ +// Duration: init, +// Cap: max, +// Steps: int(math.Ceil(float64(max) / float64(init))), // now a required argument +// Factor: factor, +// Jitter: jitter, +// }.DelayWithReset(reset, clock) +// wait.BackoffUntil(..., delayFn.Timer(), ...) +func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { + return &exponentialBackoffManagerImpl{ + backoff: &Backoff{ + Duration: initBackoff, + Factor: backoffFactor, + Jitter: jitter, + + // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not + // what we ideally need here, we set it to max int and assume we will never use up the steps + Steps: math.MaxInt32, + Cap: maxBackoff, + }, + backoffTimer: nil, + initialBackoff: initBackoff, + lastBackoffStart: c.Now(), + backoffResetDuration: resetDuration, + clock: c, + } +} + +func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { + if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { + b.backoff.Steps = math.MaxInt32 + b.backoff.Duration = b.initialBackoff + } + b.lastBackoffStart = b.clock.Now() + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time +func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } + return b.backoffTimer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type jitteredBackoffManagerImpl struct { + clock clock.Clock + duration time.Duration + jitter float64 + backoffTimer clock.Timer +} + +// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter +// is negative, backoff will not be jittered. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct and invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewJitteredBackoffManager(duration, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...) +func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { + return &jitteredBackoffManagerImpl{ + clock: c, + duration: duration, + jitter: jitter, + backoffTimer: nil, + } +} + +func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { + jitteredPeriod := j.duration + if j.jitter > 0.0 { + jitteredPeriod = Jitter(j.duration, j.jitter) + } + return jitteredPeriod +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time +func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } + return j.backoffTimer +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +// +// Since backoffs are often subject to cancellation, we recommend using +// ExponentialBackoffWithContext and passing a context to the method. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} + +// ExponentialBackoffWithContext repeats a condition check with exponential backoff. +// It immediately returns an error if the condition returns an error, the context is cancelled +// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout). +// If an error is returned by the condition the backoff stops immediately. The condition will +// never be invoked more than backoff.Steps times. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go new file mode 100644 index 000000000..1d3dcaa74 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// DelayFunc returns the next time interval to wait. +type DelayFunc func() time.Duration + +// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes. +// Use Backoff{...}.Timer() for simple delays and more efficient timers. +func (fn DelayFunc) Timer(c clock.Clock) Timer { + return &variableTimer{fn: fn, new: c.NewTimer} +} + +// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This +// offers all of the functionality of the methods in this package. +func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition) +} + +// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that +// wish to share a single delay timer. +func (fn DelayFunc) Concurrent() DelayFunc { + var lock sync.Mutex + return func() time.Duration { + lock.Lock() + defer lock.Unlock() + return fn() + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/error.go b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go new file mode 100644 index 000000000..dd75801d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go @@ -0,0 +1,96 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" +) + +// ErrWaitTimeout is returned when the condition was not satisfied in time. +// +// Deprecated: This type will be made private in favor of Interrupted() +// for checking errors or ErrorInterrupted(err) for returning a wrapped error. +var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition")) + +// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or +// Until loop exited for any reason besides the condition returning true or an +// error. A loop is considered interrupted if the calling context is cancelled, +// the context reaches its deadline, or a backoff reaches its maximum allowed +// steps. +// +// Callers should use this method instead of comparing the error value directly to +// ErrWaitTimeout, as methods that cancel a context may not return that error. +// +// Instead of: +// +// err := wait.Poll(...) +// if err == wait.ErrWaitTimeout { +// log.Infof("Wait for operation exceeded") +// } else ... +// +// Use: +// +// err := wait.Poll(...) +// if wait.Interrupted(err) { +// log.Infof("Wait for operation exceeded") +// } else ... +func Interrupted(err error) bool { + switch { + case errors.Is(err, errWaitTimeout), + errors.Is(err, context.Canceled), + errors.Is(err, context.DeadlineExceeded): + return true + default: + return false + } +} + +// errInterrupted +type errInterrupted struct { + cause error +} + +// ErrorInterrupted returns an error that indicates the wait was ended +// early for a given reason. If no cause is provided a generic error +// will be used but callers are encouraged to provide a real cause for +// clarity in debugging. +func ErrorInterrupted(cause error) error { + switch cause.(type) { + case errInterrupted: + // no need to wrap twice since errInterrupted is only needed + // once in a chain + return cause + default: + return errInterrupted{cause} + } +} + +// errWaitTimeout is the private version of the previous ErrWaitTimeout +// and is private to prevent direct comparison. Use ErrorInterrupted(err) +// to get an error that will return true for Interrupted(err). +var errWaitTimeout = errInterrupted{} + +func (e errInterrupted) Unwrap() error { return e.cause } +func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout } +func (e errInterrupted) Error() string { + if e.cause == nil { + // returns the same error message as historical behavior + return "timed out waiting for the condition" + } + return e.cause.Error() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go new file mode 100644 index 000000000..107bfc132 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -0,0 +1,95 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// loopConditionUntilContext executes the provided condition at intervals defined by +// the provided timer until the provided context is cancelled, the condition returns +// true, or the condition returns an error. If sliding is true, the period is computed +// after condition runs. If it is false then period includes the runtime for condition. +// If immediate is false the first delay happens before any call to condition, if +// immediate is true the condition will be invoked before waiting and guarantees that +// the condition is invoked at least once, regardless of whether the context has been +// cancelled. The returned error is the error returned by the last condition or the +// context error if the context was terminated. +// +// This is the common loop construct for all polling in the wait package. +func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error { + defer t.Stop() + + var timeCh <-chan time.Time + doneCh := ctx.Done() + + if !sliding { + timeCh = t.C() + } + + // if immediate is true the condition is + // guaranteed to be executed at least once, + // if we haven't requested immediate execution, delay once + if immediate { + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + } + + if sliding { + timeCh = t.C() + } + + for { + + // Wait for either the context to be cancelled or the next invocation be called + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + + // IMPORTANT: Because there is no channel priority selection in golang + // it is possible for very short timers to "win" the race in the previous select + // repeatedly even when the context has been canceled. We therefore must + // explicitly check for context cancellation on every loop and exit if true to + // guarantee that we don't invoke condition more than once after context has + // been cancelled. + if err := ctx.Err(); err != nil { + return err + } + + if !sliding { + t.Next() + } + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + if sliding { + t.Next() + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go new file mode 100644 index 000000000..231d4c384 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go @@ -0,0 +1,315 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" +) + +// PollUntilContextCancel tries a condition func until it returns true, an error, or the context +// is cancelled or hits a deadline. condition will be invoked after the first interval if the +// context is not cancelled first. The returned error will be from ctx.Err(), the condition's +// err return value, or nil. If invoking condition takes longer than interval the next condition +// will be invoked immediately. When using very short intervals, condition may be invoked multiple +// times before a context cancellation is detected. If immediate is true, condition will be +// invoked before waiting and guarantees that condition is invoked at least once, regardless of +// whether the context has been cancelled. +func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// PollUntilContextTimeout will terminate polling after timeout duration by setting a context +// timeout. This is provided as a convenience function for callers not currently executing under +// a deadline and is equivalent to: +// +// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) +// err := PollUntilContextCancel(deadlineCtx, interval, immediate, condition) +// +// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying +// inline usage. All other behavior is identical to PollUntilContextCancel. +func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error { + deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) + defer deadlineCancel() + return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// Poll tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func Poll(interval, timeout time.Duration, condition ConditionFunc) error { + return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollWithContext tries a condition func until it returns true, an error, +// or when the context expires or the timeout is reached, whichever +// happens first. +// +// PollWithContext always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, timeout), condition) +} + +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PollUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollUntilWithContext always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollInfinite tries a condition func until it returns true or an error +// +// PollInfinite always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfinite(interval time.Duration, condition ConditionFunc) error { + return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollInfiniteWithContext tries a condition func until it returns true or an error +// +// PollInfiniteWithContext always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollImmediateWithContext tries a condition func until it returns true, an error, +// or the timeout is reached or the specified context expires, whichever happens first. +// +// PollImmediateWithContext always checks 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, timeout), condition) +} + +// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. +// +// PollImmediateUntil runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollImmediateUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollImmediateInfiniteWithContext tries a condition func until it returns true +// or an error or the specified context gets cancelled or expired. +// +// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// Internally used, each of the public 'Poll*' function defined in this +// package should invoke this internal function with appropriate parameters. +// ctx: the context specified by the caller, for infinite polling pass +// a context that never gets cancelled or expired. +// immediate: if true, the 'condition' will be invoked before waiting for the interval, +// in this case 'condition' will always be invoked at least once. +// wait: user specified WaitFunc function that controls at what interval the condition +// function should be invoked periodically and whether it is bound by a timeout. +// condition: user specified ConditionWithContextFunc function. +// +// Deprecated: will be removed in favor of loopConditionUntilContext. +func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error { + if immediate { + done, err := runConditionWithCrashProtectionWithContext(ctx, condition) + if err != nil { + return err + } + if done { + return nil + } + } + + select { + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead + return ErrWaitTimeout + default: + return waitForWithContext(ctx, wait, condition) + } +} + +// poller returns a WaitFunc that will send to the channel every interval until +// timeout has elapsed and then closes the channel. +// +// Over very short intervals you may receive no ticks before the channel is +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. +// +// Output ticks are not buffered. If the channel is not ready to receive an +// item, the tick is skipped. +// +// Deprecated: Will be removed in a future release. +func poller(interval, timeout time.Duration) waitWithContextFunc { + return waitWithContextFunc(func(ctx context.Context) <-chan struct{} { + ch := make(chan struct{}) + + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + // time.After is more convenient, but it + // potentially leaves timers around much longer + // than necessary if we exit early. + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + + for { + select { + case <-tick.C: + // If the consumer isn't ready for this signal drop it and + // check the other channels. + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-ctx.Done(): + return + } + } + }() + + return ch + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go new file mode 100644 index 000000000..3efba3213 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go @@ -0,0 +1,121 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "time" + + "k8s.io/utils/clock" +) + +// Timer abstracts how wait functions interact with time runtime efficiently. Test +// code may implement this interface directly but package consumers are encouraged +// to use the Backoff type as the primary mechanism for acquiring a Timer. The +// interface is a simplification of clock.Timer to prevent misuse. Timers are not +// expected to be safe for calls from multiple goroutines. +type Timer interface { + // C returns a channel that will receive a struct{} each time the timer fires. + // The channel should not be waited on after Stop() is invoked. It is allowed + // to cache the returned value of C() for the lifetime of the Timer. + C() <-chan time.Time + // Next is invoked by wait functions to signal timers that the next interval + // should begin. You may only use Next() if you have drained the channel C(). + // You should not call Next() after Stop() is invoked. + Next() + // Stop releases the timer. It is safe to invoke if no other methods have been + // called. + Stop() +} + +type noopTimer struct { + closedCh <-chan time.Time +} + +// newNoopTimer creates a timer with a unique channel to avoid contention +// for the channel's lock across multiple unrelated timers. +func newNoopTimer() noopTimer { + ch := make(chan time.Time) + close(ch) + return noopTimer{closedCh: ch} +} + +func (t noopTimer) C() <-chan time.Time { + return t.closedCh +} +func (noopTimer) Next() {} +func (noopTimer) Stop() {} + +type variableTimer struct { + fn DelayFunc + t clock.Timer + new func(time.Duration) clock.Timer +} + +func (t *variableTimer) C() <-chan time.Time { + if t.t == nil { + d := t.fn() + t.t = t.new(d) + } + return t.t.C() +} +func (t *variableTimer) Next() { + if t.t == nil { + return + } + d := t.fn() + t.t.Reset(d) +} +func (t *variableTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +type fixedTimer struct { + interval time.Duration + t clock.Ticker + new func(time.Duration) clock.Ticker +} + +func (t *fixedTimer) C() <-chan time.Time { + if t.t == nil { + t.t = t.new(t.interval) + } + return t.t.C() +} +func (t *fixedTimer) Next() { + // no-op for fixed timers +} +func (t *fixedTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +var ( + // RealTimer can be passed to methods that need a clock.Timer. + RealTimer = clock.RealClock{}.NewTimer +) + +var ( + // internalClock is used for test injection of clocks + internalClock = clock.RealClock{} +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 9268064af..6805e8cf9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -18,14 +18,11 @@ package wait import ( "context" - "errors" - "math" "math/rand" "sync" "time" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/utils/clock" ) // For any test of the style: @@ -83,113 +80,6 @@ func Forever(f func(), period time.Duration) { Until(f, period, NeverStop) } -// Until loops until stop channel is closed, running f every period. -// -// Until is syntactic sugar on top of JitterUntil with zero jitter factor and -// with sliding = true (which means the timer for period starts after the f -// completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. -// -// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = false (meaning the timer for period starts at the same -// time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Close stopCh to stop. f may not be invoked if stop channel is already -// closed. Pass NeverStop to if you don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) -} - -// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { - var t clock.Timer - for { - select { - case <-stopCh: - return - default: - } - - if !sliding { - t = backoff.Backoff() - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = backoff.Backoff() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and stopCh, and t.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - if !t.Stop() { - <-t.C() - } - return - case <-t.C(): - } - } -} - -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -203,9 +93,6 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration { return wait } -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - // ConditionFunc returns true if the condition is satisfied, or an error // if the loop should be aborted. type ConditionFunc func() (done bool, err error) @@ -223,425 +110,80 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc { } } -// runConditionWithCrashProtection runs a ConditionFunc with crash protection -func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { - return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) -} - -// runConditionWithCrashProtectionWithContext runs a -// ConditionWithContextFunc with crash protection. -func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { - defer runtime.HandleCrash() - return condition(ctx) -} - -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration, if factor is not zero - // and the limits imposed by Steps and Cap have not been reached. - // Should not be negative. - // The jitter does not contribute to the updates to the duration parameter. - Factor float64 - // The sleep at each iteration is the duration plus an additional - // amount chosen uniformly at random from the interval between - // zero and `jitter*duration`. - Jitter float64 - // The remaining number of iterations in which the duration - // parameter may change (but progress can be stopped earlier by - // hitting the cap). If not positive, the duration is not - // changed. Used for exponential backoff in combination with - // Factor and Cap. - Steps int - // A limit on revised values of the duration parameter. If a - // multiplication by the factor parameter would make the duration - // exceed the cap then the duration is set to the cap and the - // steps parameter is set to zero. - Cap time.Duration -} - -// Step (1) returns an amount of time to sleep determined by the -// original Duration and Jitter and (2) mutates the provided Backoff -// to update its Steps and Duration. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// contextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - -// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in -// undetermined behavior. -// The BackoffManager is supposed to be called in a single-threaded environment. -type BackoffManager interface { - Backoff() clock.Timer -} - -type exponentialBackoffManagerImpl struct { - backoff *Backoff - backoffTimer clock.Timer - lastBackoffStart time.Time - initialBackoff time.Duration - backoffResetDuration time.Duration - clock clock.Clock -} - -// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and -// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. -// This backoff manager is used to reduce load during upstream unhealthiness. -func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { - return &exponentialBackoffManagerImpl{ - backoff: &Backoff{ - Duration: initBackoff, - Factor: backoffFactor, - Jitter: jitter, - - // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not - // what we ideally need here, we set it to max int and assume we will never use up the steps - Steps: math.MaxInt32, - Cap: maxBackoff, - }, - backoffTimer: nil, - initialBackoff: initBackoff, - lastBackoffStart: c.Now(), - backoffResetDuration: resetDuration, - clock: c, - } -} - -func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { - if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { - b.backoff.Steps = math.MaxInt32 - b.backoff.Duration = b.initialBackoff - } - b.lastBackoffStart = b.clock.Now() - return b.backoff.Step() -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. -// The returned timer must be drained before calling Backoff() the second time -func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - if b.backoffTimer == nil { - b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) - } else { - b.backoffTimer.Reset(b.getNextBackoff()) - } - return b.backoffTimer -} - -type jitteredBackoffManagerImpl struct { - clock clock.Clock - duration time.Duration - jitter float64 - backoffTimer clock.Timer -} - -// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter -// is negative, backoff will not be jittered. -func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { - return &jitteredBackoffManagerImpl{ - clock: c, - duration: duration, - jitter: jitter, - backoffTimer: nil, - } -} - -func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { - jitteredPeriod := j.duration - if j.jitter > 0.0 { - jitteredPeriod = Jitter(j.duration, j.jitter) - } - return jitteredPeriod +// ContextForChannel provides a context that will be treated as cancelled +// when the provided parentCh is closed. The implementation returns +// context.Canceled for Err() if and only if the parentCh is closed. +func ContextForChannel(parentCh <-chan struct{}) context.Context { + return channelContext{stopCh: parentCh} } -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. -// The returned timer must be drained before calling Backoff() the second time -func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - backoff := j.getNextBackoff() - if j.backoffTimer == nil { - j.backoffTimer = j.clock.NewTimer(backoff) - } else { - j.backoffTimer.Reset(backoff) - } - return j.backoffTimer -} +var _ context.Context = channelContext{} -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It repeatedly checks the condition and then sleeps, using `backoff.Step()` -// to determine the length of the sleep and adjust Duration and Steps. -// Stops and returns as soon as: -// 1. the condition check returns true or an error, -// 2. `backoff.Steps` checks of the condition have been done, or -// 3. a sleep truncated by the cap on duration has been completed. -// In case (1) the returned error is what the condition function returned. -// In all other cases, ErrWaitTimeout is returned. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// Poll always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollWithContext tries a condition func until it returns true, an error, -// or when the context expires or the timeout is reached, whichever -// happens first. -// -// PollWithContext always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, timeout), condition) -} - -// PollUntil tries a condition func until it returns true, an error or stopCh is +// channelContext will behave as if the context were cancelled when stopCh is // closed. -// -// PollUntil always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return PollUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollUntilWithContext always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) -} - -// PollInfinite tries a condition func until it returns true or an error -// -// PollInfinite always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) -} - -// PollInfiniteWithContext tries a condition func until it returns true or an error -// -// PollInfiniteWithContext always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) +type channelContext struct { + stopCh <-chan struct{} } -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollImmediateWithContext tries a condition func until it returns true, an error, -// or the timeout is reached or the specified context expires, whichever happens first. -// -// PollImmediateWithContext always checks 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, timeout), condition) -} - -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollImmediateUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) +func (c channelContext) Done() <-chan struct{} { return c.stopCh } +func (c channelContext) Err() error { + select { + case <-c.stopCh: + return context.Canceled + default: + return nil + } } +func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (c channelContext) Value(key any) any { return nil } -// PollImmediateInfinite tries a condition func until it returns true or an error +// runConditionWithCrashProtection runs a ConditionFunc with crash protection. // -// PollImmediateInfinite runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + defer runtime.HandleCrash() + return condition() } -// PollImmediateInfiniteWithContext tries a condition func until it returns true -// or an error or the specified context gets cancelled or expired. +// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc +// with crash protection. // -// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) -} - -// Internally used, each of the the public 'Poll*' function defined in this -// package should invoke this internal function with appropriate parameters. -// ctx: the context specified by the caller, for infinite polling pass -// a context that never gets cancelled or expired. -// immediate: if true, the 'condition' will be invoked before waiting for the interval, -// in this case 'condition' will always be invoked at least once. -// wait: user specified WaitFunc function that controls at what interval the condition -// function should be invoked periodically and whether it is bound by a timeout. -// condition: user specified ConditionWithContextFunc function. -func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { - if immediate { - done, err := runConditionWithCrashProtectionWithContext(ctx, condition) - if err != nil { - return err - } - if done { - return nil - } - } - - select { - case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility - return ErrWaitTimeout - default: - return WaitForWithContext(ctx, wait, condition) - } +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) } -// WaitFunc creates a channel that receives an item every time a test +// waitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitFunc func(done <-chan struct{}) <-chan struct{} // WithContext converts the WaitFunc to an equivalent WaitWithContextFunc -func (w WaitFunc) WithContext() WaitWithContextFunc { +func (w waitFunc) WithContext() waitWithContextFunc { return func(ctx context.Context) <-chan struct{} { return w(ctx.Done()) } } -// WaitWithContextFunc creates a channel that receives an item every time a test +// waitWithContextFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. // // When the specified context gets cancelled or expires the function // stops sending item and returns immediately. -type WaitWithContextFunc func(ctx context.Context) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. // -// WaitFor gets a channel from 'wait()”, and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. -// -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - ctx, cancel := contextForChannel(done) - defer cancel() - return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) -} +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitWithContextFunc func(ctx context.Context) <-chan struct{} -// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// waitForWithContext continually checks 'fn' as driven by 'wait'. // -// WaitForWithContext gets a channel from 'wait()”, and then invokes 'fn' +// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn' // once for every value placed on the channel and once more when the // channel is closed. If the channel is closed and 'fn' -// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// returns false without error, waitForWithContext returns ErrWaitTimeout. // // If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. @@ -651,8 +193,11 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // When the ctx.Done() channel is closed, because the golang `select` statement is // "uniform pseudo-random", the `fn` might still run one or multiple times, -// though eventually `WaitForWithContext` will return. -func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { +// though eventually `waitForWithContext` will return. +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error { waitCtx, cancel := context.WithCancel(context.Background()) defer cancel() c := wait(waitCtx) @@ -670,88 +215,9 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit return ErrWaitTimeout } case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead return ErrWaitTimeout } } } - -// poller returns a WaitFunc that will send to the channel every interval until -// timeout has elapsed and then closes the channel. -// -// Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. -// -// Output ticks are not buffered. If the channel is not ready to receive an -// item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitWithContextFunc { - return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-ctx.Done(): - return - } - } - }() - - return ch - }) -} - -// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never -// exceeds the deadline specified by the request context. -func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - - if backoff.Steps == 1 { - break - } - - waitBeforeRetry := backoff.Step() - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(waitBeforeRetry): - } - } - - return ErrWaitTimeout -} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index e01d51906..d51f9567e 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -17,6 +17,7 @@ limitations under the License. package watch import ( + "fmt" "sync" "k8s.io/apimachinery/pkg/runtime" @@ -44,8 +45,11 @@ type Broadcaster struct { nextWatcher int64 distributing sync.WaitGroup - incoming chan Event - stopped chan struct{} + // incomingBlock allows us to ensure we don't race and end up sending events + // to a closed channel following a broadcaster shutdown. + incomingBlock sync.Mutex + incoming chan Event + stopped chan struct{} // How large to make watcher's channel. watchQueueLength int @@ -111,6 +115,8 @@ func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { // won't ever see that event, and will always see any event after they are // added. func (m *Broadcaster) blockQueue(f func()) { + m.incomingBlock.Lock() + defer m.incomingBlock.Unlock() select { case <-m.stopped: return @@ -132,7 +138,7 @@ func (m *Broadcaster) blockQueue(f func()) { // Note: new watchers will only receive new events. They won't get an entire history // of previous events. It will block until the watcher is actually added to the // broadcaster. -func (m *Broadcaster) Watch() Interface { +func (m *Broadcaster) Watch() (Interface, error) { var w *broadcasterWatcher m.blockQueue(func() { id := m.nextWatcher @@ -146,11 +152,9 @@ func (m *Broadcaster) Watch() Interface { m.watchers[id] = w }) if w == nil { - // The panic here is to be consistent with the previous interface behavior - // we are willing to re-evaluate in the future. - panic("broadcaster already stopped") + return nil, fmt.Errorf("broadcaster already stopped") } - return w + return w, nil } // WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends @@ -158,7 +162,7 @@ func (m *Broadcaster) Watch() Interface { // The returned watch will have a queue length that is at least large enough to accommodate // all of the items in queuedEvents. It will block until the watcher is actually added to // the broadcaster. -func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { +func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) (Interface, error) { var w *broadcasterWatcher m.blockQueue(func() { id := m.nextWatcher @@ -179,11 +183,9 @@ func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { } }) if w == nil { - // The panic here is to be consistent with the previous interface behavior - // we are willing to re-evaluate in the future. - panic("broadcaster already stopped") + return nil, fmt.Errorf("broadcaster already stopped") } - return w + return w, nil } // stopWatching stops the given watcher and removes it from the list. @@ -210,19 +212,38 @@ func (m *Broadcaster) closeAll() { } // Action distributes the given event among all watchers. -func (m *Broadcaster) Action(action EventType, obj runtime.Object) { +func (m *Broadcaster) Action(action EventType, obj runtime.Object) error { + m.incomingBlock.Lock() + defer m.incomingBlock.Unlock() + select { + case <-m.stopped: + return fmt.Errorf("broadcaster already stopped") + default: + } + m.incoming <- Event{action, obj} + return nil } // Action distributes the given event among all watchers, or drops it on the floor // if too many incoming actions are queued up. Returns true if the action was sent, // false if dropped. -func (m *Broadcaster) ActionOrDrop(action EventType, obj runtime.Object) bool { +func (m *Broadcaster) ActionOrDrop(action EventType, obj runtime.Object) (bool, error) { + m.incomingBlock.Lock() + defer m.incomingBlock.Unlock() + + // Ensure that if the broadcaster is stopped we do not send events to it. + select { + case <-m.stopped: + return false, fmt.Errorf("broadcaster already stopped") + default: + } + select { case m.incoming <- Event{action, obj}: - return true + return true, nil default: - return false + return false, nil } } diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index fd0550e4a..ce37fd8c1 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -27,13 +27,25 @@ import ( // Interface can be implemented by anything that knows how to watch and report changes. type Interface interface { - // Stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. + // Stop tells the producer that the consumer is done watching, so the + // producer should stop sending events and close the result channel. The + // consumer should keep watching for events until the result channel is + // closed. + // + // Because some implementations may create channels when constructed, Stop + // must always be called, even if the consumer has not yet called + // ResultChan(). + // + // Only the consumer should call Stop(), not the producer. If the producer + // errors and needs to stop the watch prematurely, it should instead send + // an error event and close the result channel. Stop() - // Returns a chan which will receive all the events. If an error occurs - // or Stop() is called, the implementation will close this channel and - // release any resources used by the watch. + // ResultChan returns a channel which will receive events from the event + // producer. If an error occurs or Stop() is called, the producer must + // close this channel and release any resources used by the watch. + // Closing the result channel tells the consumer that no more events will be + // sent. ResultChan() <-chan Event } @@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event { func (pw *ProxyWatcher) StopChan() <-chan struct{} { return pw.stopCh } + +// MockWatcher implements watch.Interface with mockable functions. +type MockWatcher struct { + StopFunc func() + ResultChanFunc func() <-chan Event +} + +var _ Interface = &MockWatcher{} + +// Stop calls StopFunc +func (mw MockWatcher) Stop() { + mw.StopFunc() +} + +// ResultChan calls ResultChanFunc +func (mw MockWatcher) ResultChan() <-chan Event { + return mw.ResultChanFunc() +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS index 3f72c69ba..349bc69d6 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -1,7 +1,6 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- pwittrock + - pwittrock reviewers: -- mengqiy -- apelisse + - apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go index 8205a4dd1..5b8514b3f 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -28,7 +28,7 @@ const ( // TODO: fix the returned errors to be introspectable. func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } @@ -183,7 +183,7 @@ func typeFields(t reflect.Type) []field { index[len(f.index)] = i ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { + if ft.Name() == "" && ft.Kind() == reflect.Pointer { // Follow pointer. ft = ft.Elem() } diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go index 6be80349a..511e625b6 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -100,7 +100,8 @@ func makeUsefulPanic(v reflect.Value) { // Tests for deep equality using reflected types. The map argument tracks // comparisons that have already been seen, which allows short circuiting on // recursive types. -func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { +// equateNilAndEmpty controls whether empty maps/slices are equivalent to nil +func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, equateNilAndEmpty bool, depth int) bool { defer makeUsefulPanic(v1) if !v1.IsValid() || !v2.IsValid() { @@ -150,17 +151,36 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, // We don't need to check length here because length is part of // an array's type, which has already been filtered for. for i := 0; i < v1.Len(); i++ { - if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, equateNilAndEmpty, depth+1) { return false } } return true case reflect.Slice: - if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { - return false - } - if v1.IsNil() || v1.Len() == 0 { - return true + if equateNilAndEmpty { + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + + if v1.IsNil() || v1.Len() == 0 { + return true + } + } else { + if v1.IsNil() != v2.IsNil() { + return false + } + + // Optimize nil and empty cases + // Two lists that are BOTH nil are equal + // No need to check v2 is nil since v1.IsNil == v2.IsNil from above + if v1.IsNil() { + return true + } + + // Two lists that are both empty and both non nil are equal + if v1.Len() == 0 || v2.Len() == 0 { + return true + } } if v1.Len() != v2.Len() { return false @@ -169,7 +189,7 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, return true } for i := 0; i < v1.Len(); i++ { - if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, equateNilAndEmpty, depth+1) { return false } } @@ -178,22 +198,40 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, if v1.IsNil() || v2.IsNil() { return v1.IsNil() == v2.IsNil() } - return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, equateNilAndEmpty, depth+1) case reflect.Ptr: - return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, equateNilAndEmpty, depth+1) case reflect.Struct: for i, n := 0, v1.NumField(); i < n; i++ { - if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { + if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, equateNilAndEmpty, depth+1) { return false } } return true case reflect.Map: - if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { - return false - } - if v1.IsNil() || v1.Len() == 0 { - return true + if equateNilAndEmpty { + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + } else { + if v1.IsNil() != v2.IsNil() { + return false + } + + // Optimize nil and empty cases + // Two maps that are BOTH nil are equal + // No need to check v2 is nil since v1.IsNil == v2.IsNil from above + if v1.IsNil() { + return true + } + + // Two maps that are both empty and both non nil are equal + if v1.Len() == 0 || v2.Len() == 0 { + return true + } } if v1.Len() != v2.Len() { return false @@ -202,7 +240,7 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, return true } for _, k := range v1.MapKeys() { - if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, equateNilAndEmpty, depth+1) { return false } } @@ -232,6 +270,14 @@ func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, // Unexported field members cannot be compared and will cause an informative panic; you must add an Equality // function for these types. func (e Equalities) DeepEqual(a1, a2 interface{}) bool { + return e.deepEqual(a1, a2, true) +} + +func (e Equalities) DeepEqualWithNilDifferentFromEmpty(a1, a2 interface{}) bool { + return e.deepEqual(a1, a2, false) +} + +func (e Equalities) deepEqual(a1, a2 interface{}, equateNilAndEmpty bool) bool { if a1 == nil || a2 == nil { return a1 == a2 } @@ -240,7 +286,7 @@ func (e Equalities) DeepEqual(a1, a2 interface{}) bool { if v1.Type() != v2.Type() { return false } - return e.deepValueEqual(v1, v2, make(map[visit]bool), 0) + return e.deepValueEqual(v1, v2, make(map[visit]bool), equateNilAndEmpty, 0) } func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { @@ -327,7 +373,7 @@ func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool return true } return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) - case reflect.Ptr: + case reflect.Pointer: if v1.IsNil() { return true } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go new file mode 100644 index 000000000..0d50d44ac --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go new file mode 100644 index 000000000..1f890bcfc --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go new file mode 100644 index 000000000..d8a816f1e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go new file mode 100644 index 000000000..e8e371d7d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use +// with apply. +type MatchResourcesApplyConfiguration struct { + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` + ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` + MatchPolicy *apiadmissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` +} + +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with +// apply. +func MatchResources() *MatchResourcesApplyConfiguration { + return &MatchResourcesApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.ObjectSelector = value + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithExcludeResourceRules adds the given value to the ExcludeResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludeResourceRules") + } + b.ExcludeResourceRules = append(b.ExcludeResourceRules, *values[i]) + } + return b +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value apiadmissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration { + b.MatchPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index eba37bafd..cd8096f90 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -37,9 +37,10 @@ type MutatingWebhookApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} @@ -139,3 +140,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go index 7ae061e3c..58b71d6d5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct { Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithSelfLink(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values . return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithClusterName(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *MutatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -268,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go similarity index 57% rename from vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go rename to vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go index e072edb85..eda3bf635 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go @@ -16,29 +16,39 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. -type RuleWithOperationsApplyConfiguration struct { - Operations []v1beta1.OperationType `json:"operations,omitempty"` - RuleApplyConfiguration `json:",inline"` +type NamedRuleWithOperationsApplyConfiguration struct { + ResourceNames []string `json:"resourceNames,omitempty"` + RuleWithOperationsApplyConfiguration `json:",inline"` } -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. -func RuleWithOperations() *RuleWithOperationsApplyConfiguration { - return &RuleWithOperationsApplyConfiguration{} +func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { + return &NamedRuleWithOperationsApplyConfiguration{} +} + +// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceNames field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.ResourceNames = append(b.ResourceNames, values[i]) + } + return b } // WithOperations adds the given value to the Operations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1beta1.OperationType) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.Operations = append(b.Operations, values[i]) } @@ -48,7 +58,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1beta1. // WithAPIGroups adds the given value to the APIGroups field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.APIGroups = append(b.APIGroups, values[i]) } @@ -58,7 +68,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) * // WithAPIVersions adds the given value to the APIVersions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.APIVersions = append(b.APIVersions, values[i]) } @@ -68,7 +78,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) // WithResources adds the given value to the Resources field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { b.Resources = append(b.Resources, values[i]) } @@ -78,7 +88,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) * // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleWithOperationsApplyConfiguration { +func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { b.Scope = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go new file mode 100644 index 000000000..07577929a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use +// with apply. +type ParamKindApplyConfiguration struct { + APIVersion *string `json:"apiVersion,omitempty"` + Kind *string `json:"kind,omitempty"` +} + +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with +// apply. +func ParamKind() *ParamKindApplyConfiguration { + return &ParamKindApplyConfiguration{} +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithAPIVersion(value string) *ParamKindApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithKind(value string) *ParamKindApplyConfiguration { + b.Kind = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go new file mode 100644 index 000000000..73cda9b04 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use +// with apply. +type ParamRefApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` +} + +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with +// apply. +func ParamRef() *ParamRefApplyConfiguration { + return &ParamRefApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithName(value string) *ParamRefApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyConfiguration { + b.Namespace = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { + b.Selector = value + return b +} + +// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { + b.ParameterNotFoundAction = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go index 41d4179df..36a93643c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/admissionregistration/v1" ) -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use +// RuleApplyConfiguration represents a declarative configuration of the Rule type for use // with apply. type RuleApplyConfiguration struct { APIGroups []string `json:"apiGroups,omitempty"` @@ -31,7 +31,7 @@ type RuleApplyConfiguration struct { Scope *v1.ScopeType `json:"scope,omitempty"` } -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with +// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with // apply. func Rule() *RuleApplyConfiguration { return &RuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go index 59bbb8fe3..92bddd502 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/admissionregistration/v1" ) -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use +// RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use // with apply. type RuleWithOperationsApplyConfiguration struct { Operations []v1.OperationType `json:"operations,omitempty"` RuleApplyConfiguration `json:",inline"` } -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with +// RuleWithOperationsApplyConfiguration constructs a declarative configuration of the RuleWithOperations type for use with // apply. func RuleWithOperations() *RuleWithOperationsApplyConfiguration { return &RuleWithOperationsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go index 2cd55d9ea..239780664 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go new file mode 100644 index 000000000..723d10ecf --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go new file mode 100644 index 000000000..841209cae --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use +// with apply. +type ValidatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with +// apply. +func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b +} + +// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a +// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") +} + +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicy.Name) + + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicySpecApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..1acad056f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// with apply. +type ValidatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// apply. +func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b +} + +// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a +// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicyBinding.Name) + + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go new file mode 100644 index 000000000..eb426af42 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" +) + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// with apply. +type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1.ValidationAction `json:"validationActions,omitempty"` +} + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// apply. +func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go new file mode 100644 index 000000000..1635b30a6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go @@ -0,0 +1,117 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" +) + +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use +// with apply. +type ValidatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Validations []ValidationApplyConfiguration `json:"validations,omitempty"` + FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` +} + +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// apply. +func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { + return &ValidatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithValidations adds the given value to the Validations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Validations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithValidations") + } + b.Validations = append(b.Validations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1.FailurePolicyType) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go new file mode 100644 index 000000000..e6f4e8459 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index d0691de10..a2c705eb5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -36,9 +36,10 @@ type ValidatingWebhookApplyConfiguration struct { SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} @@ -130,3 +131,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go index ae19ed81e..0d1a6c81a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct { Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithSelfLink(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithClusterName(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ValidatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -268,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go new file mode 100644 index 000000000..2a828b6b4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use +// with apply. +type ValidationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with +// apply. +func Validation() *ValidationApplyConfiguration { + return &ValidationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithExpression(value string) *ValidationApplyConfiguration { + b.Expression = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationApplyConfiguration { + b.Message = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go new file mode 100644 index 000000000..9dd20afa7 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use +// with apply. +type VariableApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with +// apply. +func Variable() *VariableApplyConfiguration { + return &VariableApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go index aa358ae20..77f2227b9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go new file mode 100644 index 000000000..958a53740 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go new file mode 100644 index 000000000..f36c2f0f5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go new file mode 100644 index 000000000..7f983dcb2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go new file mode 100644 index 000000000..e443535b6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use +// with apply. +type MatchResourcesApplyConfiguration struct { + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` + ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` + MatchPolicy *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"` +} + +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with +// apply. +func MatchResources() *MatchResourcesApplyConfiguration { + return &MatchResourcesApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.ObjectSelector = value + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithExcludeResourceRules adds the given value to the ExcludeResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludeResourceRules") + } + b.ExcludeResourceRules = append(b.ExcludeResourceRules, *values[i]) + } + return b +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1alpha1.MatchPolicyType) *MatchResourcesApplyConfiguration { + b.MatchPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go new file mode 100644 index 000000000..5e6744fd7 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" +) + +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use +// with apply. +type NamedRuleWithOperationsApplyConfiguration struct { + ResourceNames []string `json:"resourceNames,omitempty"` + v1.RuleWithOperationsApplyConfiguration `json:",inline"` +} + +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with +// apply. +func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { + return &NamedRuleWithOperationsApplyConfiguration{} +} + +// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceNames field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.ResourceNames = append(b.ResourceNames, values[i]) + } + return b +} + +// WithOperations adds the given value to the Operations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Operations field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Operations = append(b.Operations, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIVersions field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIVersions = append(b.APIVersions, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { + b.Scope = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go new file mode 100644 index 000000000..daf17fb24 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use +// with apply. +type ParamKindApplyConfiguration struct { + APIVersion *string `json:"apiVersion,omitempty"` + Kind *string `json:"kind,omitempty"` +} + +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with +// apply. +func ParamKind() *ParamKindApplyConfiguration { + return &ParamKindApplyConfiguration{} +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithAPIVersion(value string) *ParamKindApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithKind(value string) *ParamKindApplyConfiguration { + b.Kind = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go new file mode 100644 index 000000000..c4fff1d47 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use +// with apply. +type ParamRefApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` +} + +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with +// apply. +func ParamRef() *ParamRefApplyConfiguration { + return &ParamRefApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithName(value string) *ParamRefApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyConfiguration { + b.Namespace = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { + b.Selector = value + return b +} + +// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { + b.ParameterNotFoundAction = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go new file mode 100644 index 000000000..d1a7fff50 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go new file mode 100644 index 000000000..fe60eb5f2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use +// with apply. +type ValidatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with +// apply. +func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a +// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") +} + +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicy.Name) + + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicySpecApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..0c11ee594 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// with apply. +type ValidatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// apply. +func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a +// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicyBinding.Name) + + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go new file mode 100644 index 000000000..0f8e4e435 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// with apply. +type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` +} + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// apply. +func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1alpha1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go new file mode 100644 index 000000000..d5d352994 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -0,0 +1,117 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use +// with apply. +type ValidatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Validations []ValidationApplyConfiguration `json:"validations,omitempty"` + FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` +} + +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// apply. +func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { + return &ValidatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithValidations adds the given value to the Validations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Validations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithValidations") + } + b.Validations = append(b.Validations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go new file mode 100644 index 000000000..2fec5ba47 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go new file mode 100644 index 000000000..5f7304373 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use +// with apply. +type ValidationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with +// apply. +func Validation() *ValidationApplyConfiguration { + return &ValidationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithExpression(value string) *ValidationApplyConfiguration { + b.Expression = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationApplyConfiguration { + b.Message = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go new file mode 100644 index 000000000..0459dae65 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use +// with apply. +type VariableApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with +// apply. +func Variable() *VariableApplyConfiguration { + return &VariableApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go new file mode 100644 index 000000000..8718db944 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use +// with apply. +type AuditAnnotationApplyConfiguration struct { + Key *string `json:"key,omitempty"` + ValueExpression *string `json:"valueExpression,omitempty"` +} + +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with +// apply. +func AuditAnnotation() *AuditAnnotationApplyConfiguration { + return &AuditAnnotationApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration { + b.Key = &value + return b +} + +// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ValueExpression field is set to the value of the last call. +func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration { + b.ValueExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go new file mode 100644 index 000000000..66cfc8cdc --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use +// with apply. +type ExpressionWarningApplyConfiguration struct { + FieldRef *string `json:"fieldRef,omitempty"` + Warning *string `json:"warning,omitempty"` +} + +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with +// apply. +func ExpressionWarning() *ExpressionWarningApplyConfiguration { + return &ExpressionWarningApplyConfiguration{} +} + +// WithFieldRef sets the FieldRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FieldRef field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration { + b.FieldRef = &value + return b +} + +// WithWarning sets the Warning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Warning field is set to the value of the last call. +func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration { + b.Warning = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go new file mode 100644 index 000000000..63db7fc80 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use +// with apply. +type MatchConditionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with +// apply. +func MatchCondition() *MatchConditionApplyConfiguration { + return &MatchConditionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithName(value string) *MatchConditionApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *MatchConditionApplyConfiguration) WithExpression(value string) *MatchConditionApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go new file mode 100644 index 000000000..4005e55a3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use +// with apply. +type MatchResourcesApplyConfiguration struct { + NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` + ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` + MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` +} + +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with +// apply. +func MatchResources() *MatchResourcesApplyConfiguration { + return &MatchResourcesApplyConfiguration{} +} + +// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NamespaceSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.NamespaceSelector = value + return b +} + +// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObjectSelector field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { + b.ObjectSelector = value + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithExcludeResourceRules adds the given value to the ExcludeResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeResourceRules field. +func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExcludeResourceRules") + } + b.ExcludeResourceRules = append(b.ExcludeResourceRules, *values[i]) + } + return b +} + +// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchPolicy field is set to the value of the last call. +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1beta1.MatchPolicyType) *MatchResourcesApplyConfiguration { + b.MatchPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index ddb728aff..b2ab76aef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -20,26 +20,28 @@ package v1beta1 import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"` - Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` + Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"` MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` ReinvocationPolicy *admissionregistrationv1beta1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} @@ -64,7 +66,7 @@ func (b *MutatingWebhookApplyConfiguration) WithClientConfig(value *WebhookClien // WithRules adds the given value to the Rules field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Rules field. -func (b *MutatingWebhookApplyConfiguration) WithRules(values ...*RuleWithOperationsApplyConfiguration) *MutatingWebhookApplyConfiguration { +func (b *MutatingWebhookApplyConfiguration) WithRules(values ...*v1.RuleWithOperationsApplyConfiguration) *MutatingWebhookApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithRules") @@ -93,7 +95,7 @@ func (b *MutatingWebhookApplyConfiguration) WithMatchPolicy(value admissionregis // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *MutatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { +func (b *MutatingWebhookApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { b.NamespaceSelector = value return b } @@ -101,7 +103,7 @@ func (b *MutatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.Labe // WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObjectSelector field is set to the value of the last call. -func (b *MutatingWebhookApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { +func (b *MutatingWebhookApplyConfiguration) WithObjectSelector(value *metav1.LabelSelectorApplyConfiguration) *MutatingWebhookApplyConfiguration { b.ObjectSelector = value return b } @@ -139,3 +141,16 @@ func (b *MutatingWebhookApplyConfiguration) WithReinvocationPolicy(value admissi b.ReinvocationPolicy = &value return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 178745c23..51bb82389 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct { Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithSelfLink(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values . return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithClusterName(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *MutatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -268,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go new file mode 100644 index 000000000..5de70c7ad --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" +) + +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use +// with apply. +type NamedRuleWithOperationsApplyConfiguration struct { + ResourceNames []string `json:"resourceNames,omitempty"` + v1.RuleWithOperationsApplyConfiguration `json:",inline"` +} + +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with +// apply. +func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { + return &NamedRuleWithOperationsApplyConfiguration{} +} + +// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceNames field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.ResourceNames = append(b.ResourceNames, values[i]) + } + return b +} + +// WithOperations adds the given value to the Operations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Operations field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Operations = append(b.Operations, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIVersions field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.APIVersions = append(b.APIVersions, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithScope sets the Scope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scope field is set to the value of the last call. +func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { + b.Scope = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go new file mode 100644 index 000000000..398312528 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use +// with apply. +type ParamKindApplyConfiguration struct { + APIVersion *string `json:"apiVersion,omitempty"` + Kind *string `json:"kind,omitempty"` +} + +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with +// apply. +func ParamKind() *ParamKindApplyConfiguration { + return &ParamKindApplyConfiguration{} +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithAPIVersion(value string) *ParamKindApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ParamKindApplyConfiguration) WithKind(value string) *ParamKindApplyConfiguration { + b.Kind = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go new file mode 100644 index 000000000..0a94ae067 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use +// with apply. +type ParamRefApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` +} + +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with +// apply. +func ParamRef() *ParamRefApplyConfiguration { + return &ParamRefApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithName(value string) *ParamRefApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyConfiguration { + b.Namespace = &value + return b +} + +// WithSelector sets the Selector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Selector field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { + b.Selector = value + return b +} + +// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { + b.ParameterNotFoundAction = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go deleted file mode 100644 index 21151b998..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/rule.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" -) - -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use -// with apply. -type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1beta1.ScopeType `json:"scope,omitempty"` -} - -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with -// apply. -func Rule() *RuleApplyConfiguration { - return &RuleApplyConfiguration{} -} - -// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIGroups field. -func (b *RuleApplyConfiguration) WithAPIGroups(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) - } - return b -} - -// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the APIVersions field. -func (b *RuleApplyConfiguration) WithAPIVersions(values ...string) *RuleApplyConfiguration { - for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) - } - return b -} - -// WithResources adds the given value to the Resources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Resources field. -func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfiguration { - for i := range values { - b.Resources = append(b.Resources, values[i]) - } - return b -} - -// WithScope sets the Scope field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1beta1.ScopeType) *RuleApplyConfiguration { - b.Scope = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go index c21b57490..70cc6b5b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go new file mode 100644 index 000000000..cea6e11de --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use +// with apply. +type TypeCheckingApplyConfiguration struct { + ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` +} + +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with +// apply. +func TypeChecking() *TypeCheckingApplyConfiguration { + return &TypeCheckingApplyConfiguration{} +} + +// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field. +func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithExpressionWarnings") + } + b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go new file mode 100644 index 000000000..c29ee56cb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use +// with apply. +type ValidatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` +} + +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with +// apply. +func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b +} + +// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a +// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") +} + +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicy.Name) + + b.WithKind("ValidatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicySpecApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..4347c4810 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// with apply. +type ValidatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// apply. +func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b +} + +// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a +// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(validatingAdmissionPolicyBinding.Name) + + b.WithKind("ValidatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go new file mode 100644 index 000000000..bddc3a40c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" +) + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// with apply. +type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` + ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"` +} + +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// apply. +func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} + +// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ValidationActions field. +func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1beta1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { + for i := range values { + b.ValidationActions = append(b.ValidationActions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go new file mode 100644 index 000000000..8b235337d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go @@ -0,0 +1,117 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" +) + +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use +// with apply. +type ValidatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Validations []ValidationApplyConfiguration `json:"validations,omitempty"` + FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"` + AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` +} + +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// apply. +func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { + return &ValidatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithValidations adds the given value to the Validations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Validations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithValidations") + } + b.Validations = append(b.Validations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1beta1.FailurePolicyType) *ValidatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AuditAnnotations field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAuditAnnotations") + } + b.AuditAnnotations = append(b.AuditAnnotations, *values[i]) + } + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go new file mode 100644 index 000000000..4612af0cf --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// with apply. +type ValidatingAdmissionPolicyStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// apply. +func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { + return &ValidatingAdmissionPolicyStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TypeChecking field is set to the value of the last call. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + b.TypeChecking = value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 8ca0e9cbe..1e107d68f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -20,25 +20,27 @@ package v1beta1 import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"` - Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` + Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"` FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"` MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"` TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} @@ -63,7 +65,7 @@ func (b *ValidatingWebhookApplyConfiguration) WithClientConfig(value *WebhookCli // WithRules adds the given value to the Rules field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Rules field. -func (b *ValidatingWebhookApplyConfiguration) WithRules(values ...*RuleWithOperationsApplyConfiguration) *ValidatingWebhookApplyConfiguration { +func (b *ValidatingWebhookApplyConfiguration) WithRules(values ...*v1.RuleWithOperationsApplyConfiguration) *ValidatingWebhookApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithRules") @@ -92,7 +94,7 @@ func (b *ValidatingWebhookApplyConfiguration) WithMatchPolicy(value admissionreg // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *ValidatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { +func (b *ValidatingWebhookApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { b.NamespaceSelector = value return b } @@ -100,7 +102,7 @@ func (b *ValidatingWebhookApplyConfiguration) WithNamespaceSelector(value *v1.La // WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObjectSelector field is set to the value of the last call. -func (b *ValidatingWebhookApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { +func (b *ValidatingWebhookApplyConfiguration) WithObjectSelector(value *metav1.LabelSelectorApplyConfiguration) *ValidatingWebhookApplyConfiguration { b.ObjectSelector = value return b } @@ -130,3 +132,16 @@ func (b *ValidatingWebhookApplyConfiguration) WithAdmissionReviewVersions(values } return b } + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *ValidatingWebhookApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingWebhookApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go index e60d997f8..c3535c180 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct { Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithSelfLink(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithClusterName(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ValidatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -268,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go new file mode 100644 index 000000000..019e8e7aa --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use +// with apply. +type ValidationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` +} + +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with +// apply. +func Validation() *ValidationApplyConfiguration { + return &ValidationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithExpression(value string) *ValidationApplyConfiguration { + b.Expression = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationApplyConfiguration { + b.Message = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MessageExpression field is set to the value of the last call. +func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration { + b.MessageExpression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go new file mode 100644 index 000000000..0ece197db --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use +// with apply. +type VariableApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Expression *string `json:"expression,omitempty"` +} + +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with +// apply. +func Variable() *VariableApplyConfiguration { + return &VariableApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration { + b.Name = &value + return b +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go index 490f9d5f3..76ff71b4a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go index d36f7603c..8394298b9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go @@ -18,15 +18,16 @@ limitations under the License. package v1alpha1 -// ServerStorageVersionApplyConfiguration represents an declarative configuration of the ServerStorageVersion type for use +// ServerStorageVersionApplyConfiguration represents a declarative configuration of the ServerStorageVersion type for use // with apply. type ServerStorageVersionApplyConfiguration struct { APIServerID *string `json:"apiServerID,omitempty"` EncodingVersion *string `json:"encodingVersion,omitempty"` DecodableVersions []string `json:"decodableVersions,omitempty"` + ServedVersions []string `json:"servedVersions,omitempty"` } -// ServerStorageVersionApplyConfiguration constructs an declarative configuration of the ServerStorageVersion type for use with +// ServerStorageVersionApplyConfiguration constructs a declarative configuration of the ServerStorageVersion type for use with // apply. func ServerStorageVersion() *ServerStorageVersionApplyConfiguration { return &ServerStorageVersionApplyConfiguration{} @@ -57,3 +58,13 @@ func (b *ServerStorageVersionApplyConfiguration) WithDecodableVersions(values .. } return b } + +// WithServedVersions adds the given value to the ServedVersions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServedVersions field. +func (b *ServerStorageVersionApplyConfiguration) WithServedVersions(values ...string) *ServerStorageVersionApplyConfiguration { + for i := range values { + b.ServedVersions = append(b.ServedVersions, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go index 180b77625..d734328b0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageVersionApplyConfiguration represents an declarative configuration of the StorageVersion type for use +// StorageVersionApplyConfiguration represents a declarative configuration of the StorageVersion type for use // with apply. type StorageVersionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StorageVersionApplyConfiguration struct { Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"` } -// StorageVersion constructs an declarative configuration of the StorageVersion type for use with +// StorageVersion constructs a declarative configuration of the StorageVersion type for use with // apply. func StorageVersion(name string) *StorageVersionApplyConfiguration { b := &StorageVersionApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *StorageVersionApplyConfiguration) WithNamespace(value string) *StorageV return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StorageVersionApplyConfiguration) WithSelfLink(value string) *StorageVersionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *StorageVersionApplyConfiguration) WithFinalizers(values ...string) *Sto return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *StorageVersionApplyConfiguration) WithClusterName(value string) *StorageVersionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *StorageVersionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *StorageVersionApplyConfiguration) WithStatus(value *StorageVersionStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go index 75b625647..68d894d0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StorageVersionConditionApplyConfiguration represents an declarative configuration of the StorageVersionCondition type for use +// StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use // with apply. type StorageVersionConditionApplyConfiguration struct { Type *v1alpha1.StorageVersionConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StorageVersionConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StorageVersionConditionApplyConfiguration constructs an declarative configuration of the StorageVersionCondition type for use with +// StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with // apply. func StorageVersionCondition() *StorageVersionConditionApplyConfiguration { return &StorageVersionConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go index 43b0bf71b..2e25d6752 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// StorageVersionStatusApplyConfiguration represents an declarative configuration of the StorageVersionStatus type for use +// StorageVersionStatusApplyConfiguration represents a declarative configuration of the StorageVersionStatus type for use // with apply. type StorageVersionStatusApplyConfiguration struct { StorageVersions []ServerStorageVersionApplyConfiguration `json:"storageVersions,omitempty"` @@ -26,7 +26,7 @@ type StorageVersionStatusApplyConfiguration struct { Conditions []StorageVersionConditionApplyConfiguration `json:"conditions,omitempty"` } -// StorageVersionStatusApplyConfiguration constructs an declarative configuration of the StorageVersionStatus type for use with +// StorageVersionStatusApplyConfiguration constructs a declarative configuration of the StorageVersionStatus type for use with // apply. func StorageVersionStatus() *StorageVersionStatusApplyConfiguration { return &StorageVersionStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go index 28a0c582b..25b645059 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithSelfLink(value string) *ControllerRevisionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithClusterName(value string) *ControllerRevisionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ControllerRevisionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -275,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go index 6dd8c6e88..a15785651 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithSelfLink(value string) *DaemonSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSe return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithClusterName(value string) *DaemonSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *DaemonSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go index 283ae10a2..de91745b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go index 5e808874b..99dc5abae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go index d1c4462aa..a40dc1651 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go index f1ba18226..15af4e66b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go index d33321c52..52b7a21b7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithSelfLink(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *Deploym return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithClusterName(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *DeploymentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go index 774704413..84df752bc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go index 812253dae..063f1c276 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go index 7b48b4255..747813ade 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go index e9571edab..dc4b97c55 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go index 0affbf82f..35ca4e4df 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithSelfLink(value string) *ReplicaSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *Replica return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithClusterName(value string) *ReplicaSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ReplicaSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go index 19b0355d1..32da80842 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go index ca3286583..039058486 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go index 12f41490f..a1408ae25 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go index ebe8e86d1..e898f5081 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go index ca9daaf24..2bc293724 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go index 2090d88ed..dd0de81a6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go @@ -18,13 +18,18 @@ limitations under the License. package v1 -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { - Partition *int32 `json:"partition,omitempty"` + Partition *int32 `json:"partition,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} @@ -37,3 +42,11 @@ func (b *RollingUpdateStatefulSetStrategyApplyConfiguration) WithPartition(value b.Partition = &value return b } + +// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxUnavailable field is set to the value of the last call. +func (b *RollingUpdateStatefulSetStrategyApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *RollingUpdateStatefulSetStrategyApplyConfiguration { + b.MaxUnavailable = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go index 7cb5ec12c..6f2b340da 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithSelfLink(value string) *StatefulSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *Statef return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithClusterName(value string) *StatefulSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *StatefulSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go index f9d47850d..c62a5e854 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go new file mode 100644 index 000000000..86f39e16c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use +// with apply. +type StatefulSetOrdinalsApplyConfiguration struct { + Start *int32 `json:"start,omitempty"` +} + +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with +// apply. +func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { + return &StatefulSetOrdinalsApplyConfiguration{} +} + +// WithStart sets the Start field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Start field is set to the value of the last call. +func (b *StatefulSetOrdinalsApplyConfiguration) WithStart(value int32) *StatefulSetOrdinalsApplyConfiguration { + b.Start = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go index ba01d5d3c..cd65fd436 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index ee0ed40a5..1848a963c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,9 +37,10 @@ type StatefulSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} @@ -129,3 +130,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPo b.PersistentVolumeClaimRetentionPolicy = value return b } + +// WithOrdinals sets the Ordinals field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ordinals field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithOrdinals(value *StatefulSetOrdinalsApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.Ordinals = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go index d88881b65..637a1c649 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go index 5268a1e06..b59e10735 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/apps/v1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go index bcdfa44b2..606de58a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithSelfLink(value string) *ControllerRevisionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithClusterName(value string) *ControllerRevisionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ControllerRevisionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -275,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go index eddab1789..145aaed70 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithSelfLink(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *Deploym return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithClusterName(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *DeploymentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go index 9da8ce089..504dddd94 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go index 5e18476bd..5531c756f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go index f8d1cf5d2..adc023a34 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go index 7279318a8..2c322b4ac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go index 131e57a39..775f82eef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go index dde5f064b..244701a5e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go index 64273f618..94c297134 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go @@ -18,13 +18,18 @@ limitations under the License. package v1beta1 -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { - Partition *int32 `json:"partition,omitempty"` + Partition *int32 `json:"partition,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} @@ -37,3 +42,11 @@ func (b *RollingUpdateStatefulSetStrategyApplyConfiguration) WithPartition(value b.Partition = &value return b } + +// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxUnavailable field is set to the value of the last call. +func (b *RollingUpdateStatefulSetStrategyApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *RollingUpdateStatefulSetStrategyApplyConfiguration { + b.MaxUnavailable = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go index a4f64cd85..270593886 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithSelfLink(value string) *StatefulSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *Statef return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithClusterName(value string) *StatefulSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *StatefulSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go index 97e994ab7..8a17391cd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1beta1.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go rename to vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go index 30c3724cf..2e3049e5e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedflexvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go @@ -18,22 +18,22 @@ limitations under the License. package v1beta1 -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` +type StatefulSetOrdinalsApplyConfiguration struct { + Start *int32 `json:"start,omitempty"` } -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} +func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { + return &StatefulSetOrdinalsApplyConfiguration{} } -// WithDriver sets the Driver field in the declarative configuration to the given value +// WithStart sets the Start field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value +// If called multiple times, the Start field is set to the value of the last call. +func (b *StatefulSetOrdinalsApplyConfiguration) WithStart(value int32) *StatefulSetOrdinalsApplyConfiguration { + b.Start = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go index 0048724c0..69a8ee0f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index 886433d9e..ac325d717 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,9 +37,10 @@ type StatefulSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} @@ -129,3 +130,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPo b.PersistentVolumeClaimRetentionPolicy = value return b } + +// WithOrdinals sets the Ordinals field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ordinals field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithOrdinals(value *StatefulSetOrdinalsApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.Ordinals = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go index f31066b6f..27ae7540f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go index 895c1e7f8..7714ebbb7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go index f5d906162..5f75a4551 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithSelfLink(value string) *ControllerRevisionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithClusterName(value string) *ControllerRevisionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ControllerRevisionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -275,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go index 92708f0a8..9ffda6182 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithSelfLink(value string) *DaemonSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSe return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithClusterName(value string) *DaemonSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *DaemonSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go index 55dc1f487..8315050f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1beta2.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go index 48137819a..74d8bf51c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go index 29cda7a90..6b0fda895 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go index 07fc07fc6..7d66f1da4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go index ad0c509db..485da788a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithSelfLink(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *Deploym return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithClusterName(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *DeploymentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go index 852a2c683..192427874 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta2.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go index 6898941ac..1b55130c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go index fe99ca991..5fa912233 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go index 8714e153e..c769436ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta2.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go index e2998f2c3..d8608aa51 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithSelfLink(value string) *ReplicaSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *Replica return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithClusterName(value string) *ReplicaSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ReplicaSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go index 47776bfa2..beec546f7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1beta2.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go index 14d548169..1d77b9e0f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go index 7c1b8fb29..d3c92e274 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go index b586b678d..ad6021d37 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go index 78ef21008..b0cc3a4ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go index f828ef70d..0046c264b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go @@ -18,13 +18,18 @@ limitations under the License. package v1beta2 -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { - Partition *int32 `json:"partition,omitempty"` + Partition *int32 `json:"partition,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} @@ -37,3 +42,11 @@ func (b *RollingUpdateStatefulSetStrategyApplyConfiguration) WithPartition(value b.Partition = &value return b } + +// WithMaxUnavailable sets the MaxUnavailable field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxUnavailable field is set to the value of the last call. +func (b *RollingUpdateStatefulSetStrategyApplyConfiguration) WithMaxUnavailable(value intstr.IntOrString) *RollingUpdateStatefulSetStrategyApplyConfiguration { + b.MaxUnavailable = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go index d4901edf9..126ab2d8b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -34,10 +34,13 @@ type ScaleApplyConfiguration struct { Status *v1beta2.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { - return &ScaleApplyConfiguration{} + b := &ScaleApplyConfiguration{} + b.WithKind("Scale") + b.WithAPIVersion("apps/v1beta2") + return b } // WithKind sets the Kind field in the declarative configuration to the given value @@ -83,15 +86,6 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -201,15 +195,6 @@ func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -231,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleAp b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go index 0a242310c..3d2b5d191 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithSelfLink(value string) *StatefulSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *Statef return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithClusterName(value string) *StatefulSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *StatefulSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go index c33e68b5e..aa45db686 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { Type *v1beta2.StatefulSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type StatefulSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go new file mode 100644 index 000000000..a899243a5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use +// with apply. +type StatefulSetOrdinalsApplyConfiguration struct { + Start *int32 `json:"start,omitempty"` +} + +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with +// apply. +func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { + return &StatefulSetOrdinalsApplyConfiguration{} +} + +// WithStart sets the Start field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Start field is set to the value of the last call. +func (b *StatefulSetOrdinalsApplyConfiguration) WithStart(value int32) *StatefulSetOrdinalsApplyConfiguration { + b.Start = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go index aee27803d..318e5f464 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index 08922cea5..bebf80c89 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,9 +37,10 @@ type StatefulSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"` + Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} @@ -129,3 +130,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithPersistentVolumeClaimRetentionPo b.PersistentVolumeClaimRetentionPolicy = value return b } + +// WithOrdinals sets the Ordinals field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ordinals field is set to the value of the last call. +func (b *StatefulSetSpecApplyConfiguration) WithOrdinals(value *StatefulSetOrdinalsApplyConfiguration) *StatefulSetSpecApplyConfiguration { + b.Ordinals = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go index 63835904c..a647cd7d2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go index 03c291491..81d4ba1df 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { Type *v1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go index 0eac22692..51ec66501 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go index bf04b0131..8150635ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithSelfLink(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...str return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithClusterName(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go index 561ac60d3..0ca2f84ea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go index abc2e05aa..fcb231c3b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -32,7 +32,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go index 2d2cfeb97..40f3db8c5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -33,10 +33,13 @@ type ScaleApplyConfiguration struct { Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { - return &ScaleApplyConfiguration{} + b := &ScaleApplyConfiguration{} + b.WithKind("Scale") + b.WithAPIVersion("autoscaling/v1") + return b } // WithKind sets the Kind field in the declarative configuration to the given value @@ -82,15 +85,6 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -200,15 +194,6 @@ func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -230,3 +215,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguratio b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go index 2339a8fef..025004ba5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScaleSpecApplyConfiguration represents an declarative configuration of the ScaleSpec type for use +// ScaleSpecApplyConfiguration represents a declarative configuration of the ScaleSpec type for use // with apply. type ScaleSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` } -// ScaleSpecApplyConfiguration constructs an declarative configuration of the ScaleSpec type for use with +// ScaleSpecApplyConfiguration constructs a declarative configuration of the ScaleSpec type for use with // apply. func ScaleSpec() *ScaleSpecApplyConfiguration { return &ScaleSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go index 81c8d1b30..51f96d235 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ScaleStatusApplyConfiguration represents an declarative configuration of the ScaleStatus type for use +// ScaleStatusApplyConfiguration represents a declarative configuration of the ScaleStatus type for use // with apply. type ScaleStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` Selector *string `json:"selector,omitempty"` } -// ScaleStatusApplyConfiguration constructs an declarative configuration of the ScaleStatus type for use with +// ScaleStatusApplyConfiguration constructs a declarative configuration of the ScaleStatus type for use with // apply. func ScaleStatus() *ScaleStatusApplyConfiguration { return &ScaleStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go index 15ef216d1..b6e071e84 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go index 34213bca3..46bd2bac2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go index 19045706d..645f09857 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go index 11a8eff26..a9c45b31a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go index 3b1a0329b..4280086f5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go index af805488e..e26b530c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithSelfLink(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...str return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithClusterName(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go index e6fdabd7c..05750cc21 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go index c020eccd3..844c6dc86 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go index c36bc3f22..e34ababc5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go index d4d551df8..f1a2c3f4e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go index 139f0fb5c..b8b735747 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { Type *v2.HPAScalingPolicyType `json:"type,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct { PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go index e768076aa..c7020f77b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct { Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go index 312ad3ddd..2f99f7d0b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go index 094ead6c1..89e6b5c68 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go index c65ad446f..86ae3348b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go @@ -22,7 +22,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go index f301e4d2b..bf68a1c34 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { Type *v2.MetricTargetType `json:"type,omitempty"` @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go index e8474b189..59732548b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go index a9482565e..2391fa5c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go index 70ba43bed..9ffd0c180 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go deleted file mode 100644 index 86601cc48..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podresourcemetricsource.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v2 - -import ( - v1 "k8s.io/api/core/v1" -) - -// PodResourceMetricSourceApplyConfiguration represents an declarative configuration of the PodResourceMetricSource type for use -// with apply. -type PodResourceMetricSourceApplyConfiguration struct { - Name *v1.ResourceName `json:"name,omitempty"` - Target *MetricTargetApplyConfiguration `json:"target,omitempty"` -} - -// PodResourceMetricSourceApplyConfiguration constructs an declarative configuration of the PodResourceMetricSource type for use with -// apply. -func PodResourceMetricSource() *PodResourceMetricSourceApplyConfiguration { - return &PodResourceMetricSourceApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithName(value v1.ResourceName) *PodResourceMetricSourceApplyConfiguration { - b.Name = &value - return b -} - -// WithTarget sets the Target field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Target field is set to the value of the last call. -func (b *PodResourceMetricSourceApplyConfiguration) WithTarget(value *MetricTargetApplyConfiguration) *PodResourceMetricSourceApplyConfiguration { - b.Target = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go index 0a7a5c259..28a35a2ae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go index 865fcc33e..4614282ce 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go index 25a065fef..ffc9042b9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go index fb5625afa..0fdbfcb55 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go index 2594e8e07..f41c5af10 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go index ae897237c..4cd56eea3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go index fe3d15e86..f03261612 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go index c118e6ca1..8dce4529d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go index ab771214e..4034d7e55 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go index e2c24646b..93e37eaff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithSelfLink(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...str return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithClusterName(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go index de3e6ea5c..8bb82298d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go index 761d94a85..6f111ceaf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go index 95ec5be43..391b57725 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go index 70beec84e..961e2c5b4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go @@ -22,7 +22,7 @@ import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2beta1.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go index b03ea2f9e..587b5a1f8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go @@ -22,7 +22,7 @@ import ( v2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2beta1.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go index 07d467972..a9e2eead4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricSourceApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go index b5e0d3e3d..4d3be8df6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricStatusApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go index a4122b898..cfcd752e2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricSourceApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go index d6172011b..f7a7777fd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricStatusApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go index 804f3f492..ad97d83c3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go index 5fdc29c13..78fbeaad0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go index aa334744e..1050165ea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go index bf0822a06..708f68bc6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go index 2903629bc..c281084b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go index 80053a6b3..d34ca1149 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go index 71ac35adb..be29e607f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go index 381925b23..ce666f0f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithSelfLink(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...str return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithClusterName(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go index ec41bfade..e9b1a9fb9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go index 0f0cae75d..a73e7ebaa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { Type *v2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type HorizontalPodAutoscalerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go index c60adee58..9629e4bd5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go index 881a874e5..1eee64505 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go index 2a535891a..b799f99e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { Type *v2beta2.HPAScalingPolicyType `json:"type,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingPolicyApplyConfiguration struct { PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go index 57c917b89..f7e8d9ae3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` @@ -30,7 +30,7 @@ type HPAScalingRulesApplyConfiguration struct { Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go index 70cbd4e81..e8b2abb0e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go index 1e7ee1419..3ec710861 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { Type *v2beta2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go index 353ec6d94..40d32795b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go @@ -22,7 +22,7 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { Type *v2beta2.MetricSourceType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go index fbf006a5a..aeec3102e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { Type *v2beta2.MetricTargetType `json:"type,omitempty"` @@ -32,7 +32,7 @@ type MetricTargetApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go index 5796a0b4c..cc409fc28 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go index eed31dab6..17b492fa0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go index 175e2120d..e87417f2e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go index 036588095..6ecbb1807 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go index e6f98be8c..cd1029726 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go index cc8118d5e..c482d75f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go index 0ab56be0f..eb13e90b7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go index 749163cc3..8b26816e5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct { Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithSelfLink(value string) *CronJobApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithClusterName(value string) *CronJobApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CronJobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go index eaf3ba8e6..62f9b5298 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go @@ -22,10 +22,11 @@ import ( v1 "k8s.io/api/batch/v1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` ConcurrencyPolicy *v1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` Suspend *bool `json:"suspend,omitempty"` @@ -34,7 +35,7 @@ type CronJobSpecApplyConfiguration struct { FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} @@ -48,6 +49,14 @@ func (b *CronJobSpecApplyConfiguration) WithSchedule(value string) *CronJobSpecA return b } +// WithTimeZone sets the TimeZone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TimeZone field is set to the value of the last call. +func (b *CronJobSpecApplyConfiguration) WithTimeZone(value string) *CronJobSpecApplyConfiguration { + b.TimeZone = &value + return b +} + // WithStartingDeadlineSeconds sets the StartingDeadlineSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartingDeadlineSeconds field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go index b7cc2bdfb..095dfe017 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct { LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go index bb84a58b0..1333e9184 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobApplyConfiguration represents an declarative configuration of the Job type for use +// JobApplyConfiguration represents a declarative configuration of the Job type for use // with apply. type JobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type JobApplyConfiguration struct { Status *JobStatusApplyConfiguration `json:"status,omitempty"` } -// Job constructs an declarative configuration of the Job type for use with +// Job constructs a declarative configuration of the Job type for use with // apply. func Job(name, namespace string) *JobApplyConfiguration { b := &JobApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *JobApplyConfiguration) WithNamespace(value string) *JobApplyConfigurati return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *JobApplyConfiguration) WithSelfLink(value string) *JobApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *JobApplyConfiguration) WithFinalizers(values ...string) *JobApplyConfig return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *JobApplyConfiguration) WithClusterName(value string) *JobApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *JobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *JobApplyConfiguration) WithStatus(value *JobStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go index 388ca7a1c..4f15bc604 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobConditionApplyConfiguration represents an declarative configuration of the JobCondition type for use +// JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use // with apply. type JobConditionApplyConfiguration struct { Type *v1.JobConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type JobConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// JobConditionApplyConfiguration constructs an declarative configuration of the JobCondition type for use with +// JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with // apply. func JobCondition() *JobConditionApplyConfiguration { return &JobConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go index e14244889..2104fe113 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go @@ -21,25 +21,31 @@ package v1 import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobSpecApplyConfiguration represents an declarative configuration of the JobSpec type for use +// JobSpecApplyConfiguration represents a declarative configuration of the JobSpec type for use // with apply. type JobSpecApplyConfiguration struct { Parallelism *int32 `json:"parallelism,omitempty"` Completions *int32 `json:"completions,omitempty"` ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"` + SuccessPolicy *SuccessPolicyApplyConfiguration `json:"successPolicy,omitempty"` BackoffLimit *int32 `json:"backoffLimit,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"` + MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` ManualSelector *bool `json:"manualSelector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"` CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"` Suspend *bool `json:"suspend,omitempty"` + PodReplacementPolicy *batchv1.PodReplacementPolicy `json:"podReplacementPolicy,omitempty"` + ManagedBy *string `json:"managedBy,omitempty"` } -// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with +// JobSpecApplyConfiguration constructs a declarative configuration of the JobSpec type for use with // apply. func JobSpec() *JobSpecApplyConfiguration { return &JobSpecApplyConfiguration{} @@ -69,6 +75,22 @@ func (b *JobSpecApplyConfiguration) WithActiveDeadlineSeconds(value int64) *JobS return b } +// WithPodFailurePolicy sets the PodFailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodFailurePolicy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithPodFailurePolicy(value *PodFailurePolicyApplyConfiguration) *JobSpecApplyConfiguration { + b.PodFailurePolicy = value + return b +} + +// WithSuccessPolicy sets the SuccessPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuccessPolicy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithSuccessPolicy(value *SuccessPolicyApplyConfiguration) *JobSpecApplyConfiguration { + b.SuccessPolicy = value + return b +} + // WithBackoffLimit sets the BackoffLimit field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the BackoffLimit field is set to the value of the last call. @@ -77,10 +99,26 @@ func (b *JobSpecApplyConfiguration) WithBackoffLimit(value int32) *JobSpecApplyC return b } +// WithBackoffLimitPerIndex sets the BackoffLimitPerIndex field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BackoffLimitPerIndex field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithBackoffLimitPerIndex(value int32) *JobSpecApplyConfiguration { + b.BackoffLimitPerIndex = &value + return b +} + +// WithMaxFailedIndexes sets the MaxFailedIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxFailedIndexes field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithMaxFailedIndexes(value int32) *JobSpecApplyConfiguration { + b.MaxFailedIndexes = &value + return b +} + // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *JobSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *JobSpecApplyConfiguration { +func (b *JobSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *JobSpecApplyConfiguration { b.Selector = value return b } @@ -124,3 +162,19 @@ func (b *JobSpecApplyConfiguration) WithSuspend(value bool) *JobSpecApplyConfigu b.Suspend = &value return b } + +// WithPodReplacementPolicy sets the PodReplacementPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PodReplacementPolicy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithPodReplacementPolicy(value batchv1.PodReplacementPolicy) *JobSpecApplyConfiguration { + b.PodReplacementPolicy = &value + return b +} + +// WithManagedBy sets the ManagedBy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagedBy field is set to the value of the last call. +func (b *JobSpecApplyConfiguration) WithManagedBy(value string) *JobSpecApplyConfiguration { + b.ManagedBy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index a36d5d0ae..071a0153f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobStatusApplyConfiguration represents an declarative configuration of the JobStatus type for use +// JobStatusApplyConfiguration represents a declarative configuration of the JobStatus type for use // with apply. type JobStatusApplyConfiguration struct { Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"` @@ -31,12 +31,14 @@ type JobStatusApplyConfiguration struct { Active *int32 `json:"active,omitempty"` Succeeded *int32 `json:"succeeded,omitempty"` Failed *int32 `json:"failed,omitempty"` + Terminating *int32 `json:"terminating,omitempty"` CompletedIndexes *string `json:"completedIndexes,omitempty"` + FailedIndexes *string `json:"failedIndexes,omitempty"` UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"` Ready *int32 `json:"ready,omitempty"` } -// JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with +// JobStatusApplyConfiguration constructs a declarative configuration of the JobStatus type for use with // apply. func JobStatus() *JobStatusApplyConfiguration { return &JobStatusApplyConfiguration{} @@ -95,6 +97,14 @@ func (b *JobStatusApplyConfiguration) WithFailed(value int32) *JobStatusApplyCon return b } +// WithTerminating sets the Terminating field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Terminating field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithTerminating(value int32) *JobStatusApplyConfiguration { + b.Terminating = &value + return b +} + // WithCompletedIndexes sets the CompletedIndexes field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CompletedIndexes field is set to the value of the last call. @@ -103,6 +113,14 @@ func (b *JobStatusApplyConfiguration) WithCompletedIndexes(value string) *JobSta return b } +// WithFailedIndexes sets the FailedIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedIndexes field is set to the value of the last call. +func (b *JobStatusApplyConfiguration) WithFailedIndexes(value string) *JobStatusApplyConfiguration { + b.FailedIndexes = &value + return b +} + // WithUncountedTerminatedPods sets the UncountedTerminatedPods field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UncountedTerminatedPods field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go index 46df3722f..901c4228e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -64,15 +64,6 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithSelfLink(value string) *JobTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -182,15 +173,6 @@ func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *Jo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithClusterName(value string) *JobTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *JobTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -204,3 +186,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *JobSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go new file mode 100644 index 000000000..05a68b3c9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodFailurePolicyApplyConfiguration represents a declarative configuration of the PodFailurePolicy type for use +// with apply. +type PodFailurePolicyApplyConfiguration struct { + Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"` +} + +// PodFailurePolicyApplyConfiguration constructs a declarative configuration of the PodFailurePolicy type for use with +// apply. +func PodFailurePolicy() *PodFailurePolicyApplyConfiguration { + return &PodFailurePolicyApplyConfiguration{} +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *PodFailurePolicyApplyConfiguration) WithRules(values ...*PodFailurePolicyRuleApplyConfiguration) *PodFailurePolicyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go new file mode 100644 index 000000000..cd32296ca --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/batch/v1" +) + +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use +// with apply. +type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { + ContainerName *string `json:"containerName,omitempty"` + Operator *v1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"` + Values []int32 `json:"values,omitempty"` +} + +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with +// apply. +func PodFailurePolicyOnExitCodesRequirement() *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + return &PodFailurePolicyOnExitCodesRequirementApplyConfiguration{} +} + +// WithContainerName sets the ContainerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerName field is set to the value of the last call. +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithContainerName(value string) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + b.ContainerName = &value + return b +} + +// WithOperator sets the Operator field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Operator field is set to the value of the last call. +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value v1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + b.Operator = &value + return b +} + +// WithValues adds the given value to the Values field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Values field. +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithValues(values ...int32) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { + for i := range values { + b.Values = append(b.Values, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go new file mode 100644 index 000000000..07af4fb0e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use +// with apply. +type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct { + Type *v1.PodConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` +} + +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with +// apply. +func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { + return &PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value v1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go new file mode 100644 index 000000000..b004921d3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/batch/v1" +) + +// PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use +// with apply. +type PodFailurePolicyRuleApplyConfiguration struct { + Action *v1.PodFailurePolicyAction `json:"action,omitempty"` + OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"` + OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"` +} + +// PodFailurePolicyRuleApplyConfiguration constructs a declarative configuration of the PodFailurePolicyRule type for use with +// apply. +func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration { + return &PodFailurePolicyRuleApplyConfiguration{} +} + +// WithAction sets the Action field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Action field is set to the value of the last call. +func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value v1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration { + b.Action = &value + return b +} + +// WithOnExitCodes sets the OnExitCodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OnExitCodes field is set to the value of the last call. +func (b *PodFailurePolicyRuleApplyConfiguration) WithOnExitCodes(value *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) *PodFailurePolicyRuleApplyConfiguration { + b.OnExitCodes = value + return b +} + +// WithOnPodConditions adds the given value to the OnPodConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OnPodConditions field. +func (b *PodFailurePolicyRuleApplyConfiguration) WithOnPodConditions(values ...*PodFailurePolicyOnPodConditionsPatternApplyConfiguration) *PodFailurePolicyRuleApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOnPodConditions") + } + b.OnPodConditions = append(b.OnPodConditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go new file mode 100644 index 000000000..a3f4f39e2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SuccessPolicyApplyConfiguration represents a declarative configuration of the SuccessPolicy type for use +// with apply. +type SuccessPolicyApplyConfiguration struct { + Rules []SuccessPolicyRuleApplyConfiguration `json:"rules,omitempty"` +} + +// SuccessPolicyApplyConfiguration constructs a declarative configuration of the SuccessPolicy type for use with +// apply. +func SuccessPolicy() *SuccessPolicyApplyConfiguration { + return &SuccessPolicyApplyConfiguration{} +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *SuccessPolicyApplyConfiguration) WithRules(values ...*SuccessPolicyRuleApplyConfiguration) *SuccessPolicyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go new file mode 100644 index 000000000..2b5e3d91f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SuccessPolicyRuleApplyConfiguration represents a declarative configuration of the SuccessPolicyRule type for use +// with apply. +type SuccessPolicyRuleApplyConfiguration struct { + SucceededIndexes *string `json:"succeededIndexes,omitempty"` + SucceededCount *int32 `json:"succeededCount,omitempty"` +} + +// SuccessPolicyRuleApplyConfiguration constructs a declarative configuration of the SuccessPolicyRule type for use with +// apply. +func SuccessPolicyRule() *SuccessPolicyRuleApplyConfiguration { + return &SuccessPolicyRuleApplyConfiguration{} +} + +// WithSucceededIndexes sets the SucceededIndexes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededIndexes field is set to the value of the last call. +func (b *SuccessPolicyRuleApplyConfiguration) WithSucceededIndexes(value string) *SuccessPolicyRuleApplyConfiguration { + b.SucceededIndexes = &value + return b +} + +// WithSucceededCount sets the SucceededCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SucceededCount field is set to the value of the last call. +func (b *SuccessPolicyRuleApplyConfiguration) WithSucceededCount(value int32) *SuccessPolicyRuleApplyConfiguration { + b.SucceededCount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go index 1409303ff..ff6b57b86 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// UncountedTerminatedPodsApplyConfiguration represents an declarative configuration of the UncountedTerminatedPods type for use +// UncountedTerminatedPodsApplyConfiguration represents a declarative configuration of the UncountedTerminatedPods type for use // with apply. type UncountedTerminatedPodsApplyConfiguration struct { Succeeded []types.UID `json:"succeeded,omitempty"` Failed []types.UID `json:"failed,omitempty"` } -// UncountedTerminatedPodsApplyConfiguration constructs an declarative configuration of the UncountedTerminatedPods type for use with +// UncountedTerminatedPodsApplyConfiguration constructs a declarative configuration of the UncountedTerminatedPods type for use with // apply. func UncountedTerminatedPods() *UncountedTerminatedPodsApplyConfiguration { return &UncountedTerminatedPodsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go index 8b16bc55c..765ed5e65 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct { Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithSelfLink(value string) *CronJobApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithClusterName(value string) *CronJobApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CronJobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go index 7ca431b1e..21043690d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go @@ -22,10 +22,11 @@ import ( v1beta1 "k8s.io/api/batch/v1beta1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` ConcurrencyPolicy *v1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` Suspend *bool `json:"suspend,omitempty"` @@ -34,7 +35,7 @@ type CronJobSpecApplyConfiguration struct { FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} @@ -48,6 +49,14 @@ func (b *CronJobSpecApplyConfiguration) WithSchedule(value string) *CronJobSpecA return b } +// WithTimeZone sets the TimeZone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TimeZone field is set to the value of the last call. +func (b *CronJobSpecApplyConfiguration) WithTimeZone(value string) *CronJobSpecApplyConfiguration { + b.TimeZone = &value + return b +} + // WithStartingDeadlineSeconds sets the StartingDeadlineSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartingDeadlineSeconds field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go index 8dca14f66..335f9e0dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct { LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go index bad60e1fb..5fd2485c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go @@ -25,14 +25,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *batchv1.JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -65,15 +65,6 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithSelfLink(value string) *JobTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -183,15 +174,6 @@ func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *Jo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithClusterName(value string) *JobTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *JobTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -205,3 +187,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *batchv1.JobSpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go index 9d46541b7..e30bb6242 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct { Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithSelfLink(value string) *CertificateSigningRequestApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...s return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithClusterName(value string) *CertificateSigningRequestApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CertificateSigningRequestApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go index 13d69cfce..7a4bfce01 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { Type *v1.RequestConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct { LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go index 81ca214a9..9c4a85693 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/certificates/v1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { Request []byte `json:"request,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct { Extra map[string]v1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go index 59d593033..897f6d1e9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 000000000..9cd10bc56 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterTrustBundleApplyConfiguration represents a declarative configuration of the ClusterTrustBundle type for use +// with apply. +type ClusterTrustBundleApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ClusterTrustBundle constructs a declarative configuration of the ClusterTrustBundle type for use with +// apply. +func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { + b := &ClusterTrustBundleApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") + return b +} + +// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from +// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a +// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API. +// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "") +} + +// ExtractClusterTrustBundleStatus is the same as ExtractClusterTrustBundle except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterTrustBundleStatus(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) { + return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "status") +} + +func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) { + b := &ClusterTrustBundleApplyConfiguration{} + err := managedfields.ExtractInto(clusterTrustBundle, internal.Parser().Type("io.k8s.api.certificates.v1alpha1.ClusterTrustBundle"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterTrustBundle.Name) + + b.WithKind("ClusterTrustBundle") + b.WithAPIVersion("certificates.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ClusterTrustBundleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundleSpecApplyConfiguration) *ClusterTrustBundleApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterTrustBundleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go new file mode 100644 index 000000000..7bb36f708 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterTrustBundleSpecApplyConfiguration represents a declarative configuration of the ClusterTrustBundleSpec type for use +// with apply. +type ClusterTrustBundleSpecApplyConfiguration struct { + SignerName *string `json:"signerName,omitempty"` + TrustBundle *string `json:"trustBundle,omitempty"` +} + +// ClusterTrustBundleSpecApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleSpec type for use with +// apply. +func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { + return &ClusterTrustBundleSpecApplyConfiguration{} +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.SignerName = &value + return b +} + +// WithTrustBundle sets the TrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrustBundle field is set to the value of the last call. +func (b *ClusterTrustBundleSpecApplyConfiguration) WithTrustBundle(value string) *ClusterTrustBundleSpecApplyConfiguration { + b.TrustBundle = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go index 907b81983..d6e08824a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct { Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithSelfLink(value string) *CertificateSigningRequestApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...s return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithClusterName(value string) *CertificateSigningRequestApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CertificateSigningRequestApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go index 2c32a3272..6e3692d1c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { Type *v1beta1.RequestConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestConditionApplyConfiguration struct { LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go index 9554b1f40..9284eca3a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/certificates/v1beta1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { Request []byte `json:"request,omitempty"` @@ -35,7 +35,7 @@ type CertificateSigningRequestSpecApplyConfiguration struct { Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go index 9d8c5d458..f82e8aed3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go index fcaddb663..ffd84583f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct { Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithSelfLink(value string) *LeaseApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithClusterName(value string) *LeaseApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *LeaseApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go index a5f6a6ebb..01d0df138 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1 import ( + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 000000000..ef7684779 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// LeaseCandidateApplyConfiguration represents a declarative configuration of the LeaseCandidate type for use +// with apply. +type LeaseCandidateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// LeaseCandidate constructs a declarative configuration of the LeaseCandidate type for use with +// apply. +func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration { + b := &LeaseCandidateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha1") + return b +} + +// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from +// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a +// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API. +// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "") +} + +// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "status") +} + +func extractLeaseCandidate(leaseCandidate *coordinationv1alpha1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) { + b := &LeaseCandidateApplyConfiguration{} + err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha1.LeaseCandidate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(leaseCandidate.Name) + b.WithNamespace(leaseCandidate.Namespace) + + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidateApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCandidateApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *LeaseCandidateApplyConfiguration) WithFinalizers(values ...string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *LeaseCandidateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithSpec(value *LeaseCandidateSpecApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseCandidateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go new file mode 100644 index 000000000..61d3dca10 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha1/leasecandidatespec.go @@ -0,0 +1,91 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + coordinationv1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use +// with apply. +type LeaseCandidateSpecApplyConfiguration struct { + LeaseName *string `json:"leaseName,omitempty"` + PingTime *v1.MicroTime `json:"pingTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + BinaryVersion *string `json:"binaryVersion,omitempty"` + EmulationVersion *string `json:"emulationVersion,omitempty"` + PreferredStrategies []coordinationv1.CoordinatedLeaseStrategy `json:"preferredStrategies,omitempty"` +} + +// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with +// apply. +func LeaseCandidateSpec() *LeaseCandidateSpecApplyConfiguration { + return &LeaseCandidateSpecApplyConfiguration{} +} + +// WithLeaseName sets the LeaseName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LeaseName field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithLeaseName(value string) *LeaseCandidateSpecApplyConfiguration { + b.LeaseName = &value + return b +} + +// WithPingTime sets the PingTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PingTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithPingTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.PingTime = &value + return b +} + +// WithRenewTime sets the RenewTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RenewTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.RenewTime = &value + return b +} + +// WithBinaryVersion sets the BinaryVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BinaryVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithBinaryVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.BinaryVersion = &value + return b +} + +// WithEmulationVersion sets the EmulationVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EmulationVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithEmulationVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.EmulationVersion = &value + return b +} + +// WithPreferredStrategies adds the given value to the PreferredStrategies field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PreferredStrategies field. +func (b *LeaseCandidateSpecApplyConfiguration) WithPreferredStrategies(values ...coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration { + for i := range values { + b.PreferredStrategies = append(b.PreferredStrategies, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go index f63ddc29e..9aa0703e8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct { Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithSelfLink(value string) *LeaseApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithClusterName(value string) *LeaseApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *LeaseApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go index 865eb7645..8c7fddfc6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1beta1 import ( + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go index df6d1c64e..45484f140 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AffinityApplyConfiguration represents an declarative configuration of the Affinity type for use +// AffinityApplyConfiguration represents a declarative configuration of the Affinity type for use // with apply. type AffinityApplyConfiguration struct { NodeAffinity *NodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` @@ -26,7 +26,7 @@ type AffinityApplyConfiguration struct { PodAntiAffinity *PodAntiAffinityApplyConfiguration `json:"podAntiAffinity,omitempty"` } -// AffinityApplyConfiguration constructs an declarative configuration of the Affinity type for use with +// AffinityApplyConfiguration constructs a declarative configuration of the Affinity type for use with // apply. func Affinity() *AffinityApplyConfiguration { return &AffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go new file mode 100644 index 000000000..1d698fd61 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use +// with apply. +type AppArmorProfileApplyConfiguration struct { + Type *v1.AppArmorProfileType `json:"type,omitempty"` + LocalhostProfile *string `json:"localhostProfile,omitempty"` +} + +// AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with +// apply. +func AppArmorProfile() *AppArmorProfileApplyConfiguration { + return &AppArmorProfileApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *AppArmorProfileApplyConfiguration) WithType(value v1.AppArmorProfileType) *AppArmorProfileApplyConfiguration { + b.Type = &value + return b +} + +// WithLocalhostProfile sets the LocalhostProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LocalhostProfile field is set to the value of the last call. +func (b *AppArmorProfileApplyConfiguration) WithLocalhostProfile(value string) *AppArmorProfileApplyConfiguration { + b.LocalhostProfile = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go index 970bf24c4..e4c2fff3f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// AttachedVolumeApplyConfiguration represents an declarative configuration of the AttachedVolume type for use +// AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use // with apply. type AttachedVolumeApplyConfiguration struct { Name *v1.UniqueVolumeName `json:"name,omitempty"` DevicePath *string `json:"devicePath,omitempty"` } -// AttachedVolumeApplyConfiguration constructs an declarative configuration of the AttachedVolume type for use with +// AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with // apply. func AttachedVolume() *AttachedVolumeApplyConfiguration { return &AttachedVolumeApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go index 6ff335e9d..d08786965 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use +// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use // with apply. type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with +// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with // apply. func AWSElasticBlockStoreVolumeSource() *AWSElasticBlockStoreVolumeSourceApplyConfiguration { return &AWSElasticBlockStoreVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go index b2774735a..40ad5ac78 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// AzureDiskVolumeSourceApplyConfiguration represents an declarative configuration of the AzureDiskVolumeSource type for use +// AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use // with apply. type AzureDiskVolumeSourceApplyConfiguration struct { DiskName *string `json:"diskName,omitempty"` @@ -33,7 +33,7 @@ type AzureDiskVolumeSourceApplyConfiguration struct { Kind *v1.AzureDataDiskKind `json:"kind,omitempty"` } -// AzureDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureDiskVolumeSource type for use with +// AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with // apply. func AzureDiskVolumeSource() *AzureDiskVolumeSourceApplyConfiguration { return &AzureDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go index f17393833..70a6b17be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFilePersistentVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFilePersistentVolumeSource type for use +// AzureFilePersistentVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFilePersistentVolumeSource type for use // with apply. type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretNamespace *string `json:"secretNamespace,omitempty"` } -// AzureFilePersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFilePersistentVolumeSource type for use with +// AzureFilePersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFilePersistentVolumeSource type for use with // apply. func AzureFilePersistentVolumeSource() *AzureFilePersistentVolumeSourceApplyConfiguration { return &AzureFilePersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go index a7f7f33d8..ff0c86791 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFileVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFileVolumeSource type for use +// AzureFileVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFileVolumeSource type for use // with apply. type AzureFileVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -26,7 +26,7 @@ type AzureFileVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AzureFileVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFileVolumeSource type for use with +// AzureFileVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFileVolumeSource type for use with // apply. func AzureFileVolumeSource() *AzureFileVolumeSourceApplyConfiguration { return &AzureFileVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go index c3d176c4d..1c463aef5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// CapabilitiesApplyConfiguration represents an declarative configuration of the Capabilities type for use +// CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use // with apply. type CapabilitiesApplyConfiguration struct { Add []v1.Capability `json:"add,omitempty"` Drop []v1.Capability `json:"drop,omitempty"` } -// CapabilitiesApplyConfiguration constructs an declarative configuration of the Capabilities type for use with +// CapabilitiesApplyConfiguration constructs a declarative configuration of the Capabilities type for use with // apply. func Capabilities() *CapabilitiesApplyConfiguration { return &CapabilitiesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go index a41936fe3..f3ee2d03e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSPersistentVolumeSource type for use +// CephFSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSPersistentVolumeSource type for use // with apply. type CephFSPersistentVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSPersistentVolumeSource type for use with +// CephFSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSPersistentVolumeSource type for use with // apply. func CephFSPersistentVolumeSource() *CephFSPersistentVolumeSourceApplyConfiguration { return &CephFSPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go index 0ea070ba5..77d53d6eb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSVolumeSource type for use +// CephFSVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSVolumeSource type for use // with apply. type CephFSVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSVolumeSource type for use with +// CephFSVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSVolumeSource type for use with // apply. func CephFSVolumeSource() *CephFSVolumeSourceApplyConfiguration { return &CephFSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go index 7754cf92f..b26573488 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CinderPersistentVolumeSource type for use +// CinderPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CinderPersistentVolumeSource type for use // with apply. type CinderPersistentVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderPersistentVolumeSourceApplyConfiguration struct { SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderPersistentVolumeSource type for use with +// CinderPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderPersistentVolumeSource type for use with // apply. func CinderPersistentVolumeSource() *CinderPersistentVolumeSourceApplyConfiguration { return &CinderPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go index 51271e279..131cbf219 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderVolumeSourceApplyConfiguration represents an declarative configuration of the CinderVolumeSource type for use +// CinderVolumeSourceApplyConfiguration represents a declarative configuration of the CinderVolumeSource type for use // with apply. type CinderVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderVolumeSource type for use with +// CinderVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderVolumeSource type for use with // apply. func CinderVolumeSource() *CinderVolumeSourceApplyConfiguration { return &CinderVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go index a666e8faa..02c4e55e1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ClientIPConfigApplyConfiguration represents an declarative configuration of the ClientIPConfig type for use +// ClientIPConfigApplyConfiguration represents a declarative configuration of the ClientIPConfig type for use // with apply. type ClientIPConfigApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` } -// ClientIPConfigApplyConfiguration constructs an declarative configuration of the ClientIPConfig type for use with +// ClientIPConfigApplyConfiguration constructs a declarative configuration of the ClientIPConfig type for use with // apply. func ClientIPConfig() *ClientIPConfigApplyConfiguration { return &ClientIPConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go new file mode 100644 index 000000000..bcfbac63e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go @@ -0,0 +1,79 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use +// with apply. +type ClusterTrustBundleProjectionApplyConfiguration struct { + Name *string `json:"name,omitempty"` + SignerName *string `json:"signerName,omitempty"` + LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + Optional *bool `json:"optional,omitempty"` + Path *string `json:"path,omitempty"` +} + +// ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with +// apply. +func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration { + return &ClusterTrustBundleProjectionApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithName(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.Name = &value + return b +} + +// WithSignerName sets the SignerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerName field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.SignerName = &value + return b +} + +// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelector field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { + b.LabelSelector = value + return b +} + +// WithOptional sets the Optional field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Optional field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithOptional(value bool) *ClusterTrustBundleProjectionApplyConfiguration { + b.Optional = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithPath(value string) *ClusterTrustBundleProjectionApplyConfiguration { + b.Path = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go index 1ef65f5a0..0044c7c0b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ComponentConditionApplyConfiguration represents an declarative configuration of the ComponentCondition type for use +// ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use // with apply. type ComponentConditionApplyConfiguration struct { Type *v1.ComponentConditionType `json:"type,omitempty"` @@ -31,7 +31,7 @@ type ComponentConditionApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// ComponentConditionApplyConfiguration constructs an declarative configuration of the ComponentCondition type for use with +// ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with // apply. func ComponentCondition() *ComponentConditionApplyConfiguration { return &ComponentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go index 6983a689b..195bde721 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ComponentStatusApplyConfiguration represents an declarative configuration of the ComponentStatus type for use +// ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use // with apply. type ComponentStatusApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ComponentStatusApplyConfiguration struct { Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"` } -// ComponentStatus constructs an declarative configuration of the ComponentStatus type for use with +// ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with // apply. func ComponentStatus(name string) *ComponentStatusApplyConfiguration { b := &ComponentStatusApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *ComponentStatusApplyConfiguration) WithNamespace(value string) *Compone return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ComponentStatusApplyConfiguration) WithSelfLink(value string) *ComponentStatusApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *ComponentStatusApplyConfiguration) WithFinalizers(values ...string) *Co return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ComponentStatusApplyConfiguration) WithClusterName(value string) *ComponentStatusApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ComponentStatusApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -268,3 +250,9 @@ func (b *ComponentStatusApplyConfiguration) WithConditions(values ...*ComponentC } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ComponentStatusApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go index 0664c1849..576b7a3d6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ConfigMapApplyConfiguration represents an declarative configuration of the ConfigMap type for use +// ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use // with apply. type ConfigMapApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ConfigMapApplyConfiguration struct { BinaryData map[string][]byte `json:"binaryData,omitempty"` } -// ConfigMap constructs an declarative configuration of the ConfigMap type for use with +// ConfigMap constructs a declarative configuration of the ConfigMap type for use with // apply. func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration { b := &ConfigMapApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *ConfigMapApplyConfiguration) WithNamespace(value string) *ConfigMapAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ConfigMapApplyConfiguration) WithSelfLink(value string) *ConfigMapApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *ConfigMapApplyConfiguration) WithFinalizers(values ...string) *ConfigMa return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ConfigMapApplyConfiguration) WithClusterName(value string) *ConfigMapApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ConfigMapApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -295,3 +277,9 @@ func (b *ConfigMapApplyConfiguration) WithBinaryData(entries map[string][]byte) } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConfigMapApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go index 8802fff48..b1fccd700 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ConfigMapEnvSourceApplyConfiguration represents an declarative configuration of the ConfigMapEnvSource type for use +// ConfigMapEnvSourceApplyConfiguration represents a declarative configuration of the ConfigMapEnvSource type for use // with apply. type ConfigMapEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// ConfigMapEnvSourceApplyConfiguration constructs an declarative configuration of the ConfigMapEnvSource type for use with +// ConfigMapEnvSourceApplyConfiguration constructs a declarative configuration of the ConfigMapEnvSource type for use with // apply. func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration { return &ConfigMapEnvSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go index 2a8c800af..26c2a75b5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapKeySelectorApplyConfiguration represents an declarative configuration of the ConfigMapKeySelector type for use +// ConfigMapKeySelectorApplyConfiguration represents a declarative configuration of the ConfigMapKeySelector type for use // with apply. type ConfigMapKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapKeySelectorApplyConfiguration constructs an declarative configuration of the ConfigMapKeySelector type for use with +// ConfigMapKeySelectorApplyConfiguration constructs a declarative configuration of the ConfigMapKeySelector type for use with // apply. func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration { return &ConfigMapKeySelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go index da9655a54..135bb7d42 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ConfigMapNodeConfigSourceApplyConfiguration represents an declarative configuration of the ConfigMapNodeConfigSource type for use +// ConfigMapNodeConfigSourceApplyConfiguration represents a declarative configuration of the ConfigMapNodeConfigSource type for use // with apply. type ConfigMapNodeConfigSourceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -32,7 +32,7 @@ type ConfigMapNodeConfigSourceApplyConfiguration struct { KubeletConfigKey *string `json:"kubeletConfigKey,omitempty"` } -// ConfigMapNodeConfigSourceApplyConfiguration constructs an declarative configuration of the ConfigMapNodeConfigSource type for use with +// ConfigMapNodeConfigSourceApplyConfiguration constructs a declarative configuration of the ConfigMapNodeConfigSource type for use with // apply. func ConfigMapNodeConfigSource() *ConfigMapNodeConfigSourceApplyConfiguration { return &ConfigMapNodeConfigSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go index 7297d3a43..308b28f57 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapProjectionApplyConfiguration represents an declarative configuration of the ConfigMapProjection type for use +// ConfigMapProjectionApplyConfiguration represents a declarative configuration of the ConfigMapProjection type for use // with apply. type ConfigMapProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapProjectionApplyConfiguration constructs an declarative configuration of the ConfigMapProjection type for use with +// ConfigMapProjectionApplyConfiguration constructs a declarative configuration of the ConfigMapProjection type for use with // apply. func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration { return &ConfigMapProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go index deaebde31..8e0e8dc0f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapVolumeSourceApplyConfiguration represents an declarative configuration of the ConfigMapVolumeSource type for use +// ConfigMapVolumeSourceApplyConfiguration represents a declarative configuration of the ConfigMapVolumeSource type for use // with apply. type ConfigMapVolumeSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -27,7 +27,7 @@ type ConfigMapVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapVolumeSourceApplyConfiguration constructs an declarative configuration of the ConfigMapVolumeSource type for use with +// ConfigMapVolumeSourceApplyConfiguration constructs a declarative configuration of the ConfigMapVolumeSource type for use with // apply. func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration { return &ConfigMapVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index d3b066d9c..eed5f7d02 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -22,34 +22,36 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ContainerApplyConfiguration represents an declarative configuration of the Container type for use +// ContainerApplyConfiguration represents a declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } -// ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with +// ContainerApplyConfiguration constructs a declarative configuration of the Container type for use with // apply. func Container() *ContainerApplyConfiguration { return &ContainerApplyConfiguration{} @@ -146,6 +148,27 @@ func (b *ContainerApplyConfiguration) WithResources(value *ResourceRequirementsA return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *ContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *ContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *ContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *ContainerApplyConfiguration { + b.RestartPolicy = &value + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go index d5c874a7c..bc9428fd1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerImageApplyConfiguration represents an declarative configuration of the ContainerImage type for use +// ContainerImageApplyConfiguration represents a declarative configuration of the ContainerImage type for use // with apply. type ContainerImageApplyConfiguration struct { Names []string `json:"names,omitempty"` SizeBytes *int64 `json:"sizeBytes,omitempty"` } -// ContainerImageApplyConfiguration constructs an declarative configuration of the ContainerImage type for use with +// ContainerImageApplyConfiguration constructs a declarative configuration of the ContainerImage type for use with // apply. func ContainerImage() *ContainerImageApplyConfiguration { return &ContainerImageApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go index a23ad9268..7acc0638f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerPortApplyConfiguration represents an declarative configuration of the ContainerPort type for use +// ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use // with apply. type ContainerPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerPortApplyConfiguration struct { HostIP *string `json:"hostIP,omitempty"` } -// ContainerPortApplyConfiguration constructs an declarative configuration of the ContainerPort type for use with +// ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with // apply. func ContainerPort() *ContainerPortApplyConfiguration { return &ContainerPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go new file mode 100644 index 000000000..ea60e3d98 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use +// with apply. +type ContainerResizePolicyApplyConfiguration struct { + ResourceName *v1.ResourceName `json:"resourceName,omitempty"` + RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` +} + +// ContainerResizePolicyApplyConfiguration constructs a declarative configuration of the ContainerResizePolicy type for use with +// apply. +func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { + return &ContainerResizePolicyApplyConfiguration{} +} + +// WithResourceName sets the ResourceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceName field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration { + b.ResourceName = &value + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { + b.RestartPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go index 6cbfc7fd9..b958e0177 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ContainerStateApplyConfiguration represents an declarative configuration of the ContainerState type for use +// ContainerStateApplyConfiguration represents a declarative configuration of the ContainerState type for use // with apply. type ContainerStateApplyConfiguration struct { Waiting *ContainerStateWaitingApplyConfiguration `json:"waiting,omitempty"` @@ -26,7 +26,7 @@ type ContainerStateApplyConfiguration struct { Terminated *ContainerStateTerminatedApplyConfiguration `json:"terminated,omitempty"` } -// ContainerStateApplyConfiguration constructs an declarative configuration of the ContainerState type for use with +// ContainerStateApplyConfiguration constructs a declarative configuration of the ContainerState type for use with // apply. func ContainerState() *ContainerStateApplyConfiguration { return &ContainerStateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go index 6c1d7311e..6eec9f7f2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateRunningApplyConfiguration represents an declarative configuration of the ContainerStateRunning type for use +// ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use // with apply. type ContainerStateRunningApplyConfiguration struct { StartedAt *v1.Time `json:"startedAt,omitempty"` } -// ContainerStateRunningApplyConfiguration constructs an declarative configuration of the ContainerStateRunning type for use with +// ContainerStateRunningApplyConfiguration constructs a declarative configuration of the ContainerStateRunning type for use with // apply. func ContainerStateRunning() *ContainerStateRunningApplyConfiguration { return &ContainerStateRunningApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go index 0383c9dd9..b067aa211 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateTerminatedApplyConfiguration represents an declarative configuration of the ContainerStateTerminated type for use +// ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use // with apply. type ContainerStateTerminatedApplyConfiguration struct { ExitCode *int32 `json:"exitCode,omitempty"` @@ -34,7 +34,7 @@ type ContainerStateTerminatedApplyConfiguration struct { ContainerID *string `json:"containerID,omitempty"` } -// ContainerStateTerminatedApplyConfiguration constructs an declarative configuration of the ContainerStateTerminated type for use with +// ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with // apply. func ContainerStateTerminated() *ContainerStateTerminatedApplyConfiguration { return &ContainerStateTerminatedApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go index e51b778c0..7756c7da0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerStateWaitingApplyConfiguration represents an declarative configuration of the ContainerStateWaiting type for use +// ContainerStateWaitingApplyConfiguration represents a declarative configuration of the ContainerStateWaiting type for use // with apply. type ContainerStateWaitingApplyConfiguration struct { Reason *string `json:"reason,omitempty"` Message *string `json:"message,omitempty"` } -// ContainerStateWaitingApplyConfiguration constructs an declarative configuration of the ContainerStateWaiting type for use with +// ContainerStateWaitingApplyConfiguration constructs a declarative configuration of the ContainerStateWaiting type for use with // apply. func ContainerStateWaiting() *ContainerStateWaitingApplyConfiguration { return &ContainerStateWaitingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index 18d2925c1..6a28939c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -18,21 +18,30 @@ limitations under the License. package v1 -// ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use +import ( + corev1 "k8s.io/api/core/v1" +) + +// ContainerStatusApplyConfiguration represents a declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` -} - -// ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"` + User *ContainerUserApplyConfiguration `json:"user,omitempty"` + AllocatedResourcesStatus []ResourceStatusApplyConfiguration `json:"allocatedResourcesStatus,omitempty"` +} + +// ContainerStatusApplyConfiguration constructs a declarative configuration of the ContainerStatus type for use with // apply. func ContainerStatus() *ContainerStatusApplyConfiguration { return &ContainerStatusApplyConfiguration{} @@ -109,3 +118,53 @@ func (b *ContainerStatusApplyConfiguration) WithStarted(value bool) *ContainerSt b.Started = &value return b } + +// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocatedResources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *ContainerStatusApplyConfiguration { + b.AllocatedResources = &value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *ContainerStatusApplyConfiguration { + b.Resources = value + return b +} + +// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VolumeMounts field. +func (b *ContainerStatusApplyConfiguration) WithVolumeMounts(values ...*VolumeMountStatusApplyConfiguration) *ContainerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVolumeMounts") + } + b.VolumeMounts = append(b.VolumeMounts, *values[i]) + } + return b +} + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithUser(value *ContainerUserApplyConfiguration) *ContainerStatusApplyConfiguration { + b.User = value + return b +} + +// WithAllocatedResourcesStatus adds the given value to the AllocatedResourcesStatus field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllocatedResourcesStatus field. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResourcesStatus(values ...*ResourceStatusApplyConfiguration) *ContainerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllocatedResourcesStatus") + } + b.AllocatedResourcesStatus = append(b.AllocatedResourcesStatus, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go new file mode 100644 index 000000000..34ec8e414 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ContainerUserApplyConfiguration represents a declarative configuration of the ContainerUser type for use +// with apply. +type ContainerUserApplyConfiguration struct { + Linux *LinuxContainerUserApplyConfiguration `json:"linux,omitempty"` +} + +// ContainerUserApplyConfiguration constructs a declarative configuration of the ContainerUser type for use with +// apply. +func ContainerUser() *ContainerUserApplyConfiguration { + return &ContainerUserApplyConfiguration{} +} + +// WithLinux sets the Linux field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Linux field is set to the value of the last call. +func (b *ContainerUserApplyConfiguration) WithLinux(value *LinuxContainerUserApplyConfiguration) *ContainerUserApplyConfiguration { + b.Linux = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go index b8165445d..a614d1080 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CSIPersistentVolumeSource type for use +// CSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CSIPersistentVolumeSource type for use // with apply. type CSIPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -30,9 +30,10 @@ type CSIPersistentVolumeSourceApplyConfiguration struct { NodeStageSecretRef *SecretReferenceApplyConfiguration `json:"nodeStageSecretRef,omitempty"` NodePublishSecretRef *SecretReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"` ControllerExpandSecretRef *SecretReferenceApplyConfiguration `json:"controllerExpandSecretRef,omitempty"` + NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"` } -// CSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIPersistentVolumeSource type for use with +// CSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIPersistentVolumeSource type for use with // apply. func CSIPersistentVolumeSource() *CSIPersistentVolumeSourceApplyConfiguration { return &CSIPersistentVolumeSourceApplyConfiguration{} @@ -115,3 +116,11 @@ func (b *CSIPersistentVolumeSourceApplyConfiguration) WithControllerExpandSecret b.ControllerExpandSecretRef = value return b } + +// WithNodeExpandSecretRef sets the NodeExpandSecretRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeExpandSecretRef field is set to the value of the last call. +func (b *CSIPersistentVolumeSourceApplyConfiguration) WithNodeExpandSecretRef(value *SecretReferenceApplyConfiguration) *CSIPersistentVolumeSourceApplyConfiguration { + b.NodeExpandSecretRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go index c2a32df8d..b58d9bbb4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIVolumeSourceApplyConfiguration represents an declarative configuration of the CSIVolumeSource type for use +// CSIVolumeSourceApplyConfiguration represents a declarative configuration of the CSIVolumeSource type for use // with apply. type CSIVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type CSIVolumeSourceApplyConfiguration struct { NodePublishSecretRef *LocalObjectReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"` } -// CSIVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIVolumeSource type for use with +// CSIVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIVolumeSource type for use with // apply. func CSIVolumeSource() *CSIVolumeSourceApplyConfiguration { return &CSIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go index 13a2e948f..5be27ec0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DaemonEndpointApplyConfiguration represents an declarative configuration of the DaemonEndpoint type for use +// DaemonEndpointApplyConfiguration represents a declarative configuration of the DaemonEndpoint type for use // with apply. type DaemonEndpointApplyConfiguration struct { Port *int32 `json:"Port,omitempty"` } -// DaemonEndpointApplyConfiguration constructs an declarative configuration of the DaemonEndpoint type for use with +// DaemonEndpointApplyConfiguration constructs a declarative configuration of the DaemonEndpoint type for use with // apply. func DaemonEndpoint() *DaemonEndpointApplyConfiguration { return &DaemonEndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go index f88a87c0b..ed6b8b1bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DownwardAPIProjectionApplyConfiguration represents an declarative configuration of the DownwardAPIProjection type for use +// DownwardAPIProjectionApplyConfiguration represents a declarative configuration of the DownwardAPIProjection type for use // with apply. type DownwardAPIProjectionApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` } -// DownwardAPIProjectionApplyConfiguration constructs an declarative configuration of the DownwardAPIProjection type for use with +// DownwardAPIProjectionApplyConfiguration constructs a declarative configuration of the DownwardAPIProjection type for use with // apply. func DownwardAPIProjection() *DownwardAPIProjectionApplyConfiguration { return &DownwardAPIProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go index b25ff25fa..ec9d013dd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DownwardAPIVolumeFileApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeFile type for use +// DownwardAPIVolumeFileApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeFile type for use // with apply. type DownwardAPIVolumeFileApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -27,7 +27,7 @@ type DownwardAPIVolumeFileApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// DownwardAPIVolumeFileApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeFile type for use with +// DownwardAPIVolumeFileApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeFile type for use with // apply. func DownwardAPIVolumeFile() *DownwardAPIVolumeFileApplyConfiguration { return &DownwardAPIVolumeFileApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go index 6913bb521..eef9d7ef8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// DownwardAPIVolumeSourceApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeSource type for use +// DownwardAPIVolumeSourceApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeSource type for use // with apply. type DownwardAPIVolumeSourceApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// DownwardAPIVolumeSourceApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeSource type for use with +// DownwardAPIVolumeSourceApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeSource type for use with // apply. func DownwardAPIVolumeSource() *DownwardAPIVolumeSourceApplyConfiguration { return &DownwardAPIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go index 021280daf..a619fdb07 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go @@ -23,14 +23,14 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// EmptyDirVolumeSourceApplyConfiguration represents an declarative configuration of the EmptyDirVolumeSource type for use +// EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use // with apply. type EmptyDirVolumeSourceApplyConfiguration struct { Medium *v1.StorageMedium `json:"medium,omitempty"` SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` } -// EmptyDirVolumeSourceApplyConfiguration constructs an declarative configuration of the EmptyDirVolumeSource type for use with +// EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with // apply. func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration { return &EmptyDirVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go index 52a54b600..536e697a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointAddressApplyConfiguration represents an declarative configuration of the EndpointAddress type for use +// EndpointAddressApplyConfiguration represents a declarative configuration of the EndpointAddress type for use // with apply. type EndpointAddressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -27,7 +27,7 @@ type EndpointAddressApplyConfiguration struct { TargetRef *ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"` } -// EndpointAddressApplyConfiguration constructs an declarative configuration of the EndpointAddress type for use with +// EndpointAddressApplyConfiguration constructs a declarative configuration of the EndpointAddress type for use with // apply. func EndpointAddress() *EndpointAddressApplyConfiguration { return &EndpointAddressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go index cc00d0e49..d0d96230c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go index b3b302fe2..98dc69aaa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointsApplyConfiguration represents an declarative configuration of the Endpoints type for use +// EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use // with apply. type EndpointsApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EndpointsApplyConfiguration struct { Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"` } -// Endpoints constructs an declarative configuration of the Endpoints type for use with +// Endpoints constructs a declarative configuration of the Endpoints type for use with // apply. func Endpoints(name, namespace string) *EndpointsApplyConfiguration { b := &EndpointsApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *EndpointsApplyConfiguration) WithNamespace(value string) *EndpointsAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EndpointsApplyConfiguration) WithSelfLink(value string) *EndpointsApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *EndpointsApplyConfiguration) WithFinalizers(values ...string) *Endpoint return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EndpointsApplyConfiguration) WithClusterName(value string) *EndpointsApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EndpointsApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -270,3 +252,9 @@ func (b *EndpointsApplyConfiguration) WithSubsets(values ...*EndpointSubsetApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointsApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go index cd0657a80..33cd8496a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointSubsetApplyConfiguration represents an declarative configuration of the EndpointSubset type for use +// EndpointSubsetApplyConfiguration represents a declarative configuration of the EndpointSubset type for use // with apply. type EndpointSubsetApplyConfiguration struct { Addresses []EndpointAddressApplyConfiguration `json:"addresses,omitempty"` @@ -26,7 +26,7 @@ type EndpointSubsetApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSubsetApplyConfiguration constructs an declarative configuration of the EndpointSubset type for use with +// EndpointSubsetApplyConfiguration constructs a declarative configuration of the EndpointSubset type for use with // apply. func EndpointSubset() *EndpointSubsetApplyConfiguration { return &EndpointSubsetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go index 9e46d25de..7aa181cf1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvFromSourceApplyConfiguration represents an declarative configuration of the EnvFromSource type for use +// EnvFromSourceApplyConfiguration represents a declarative configuration of the EnvFromSource type for use // with apply. type EnvFromSourceApplyConfiguration struct { Prefix *string `json:"prefix,omitempty"` @@ -26,7 +26,7 @@ type EnvFromSourceApplyConfiguration struct { SecretRef *SecretEnvSourceApplyConfiguration `json:"secretRef,omitempty"` } -// EnvFromSourceApplyConfiguration constructs an declarative configuration of the EnvFromSource type for use with +// EnvFromSourceApplyConfiguration constructs a declarative configuration of the EnvFromSource type for use with // apply. func EnvFromSource() *EnvFromSourceApplyConfiguration { return &EnvFromSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go index a83528a28..5894166ca 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarApplyConfiguration represents an declarative configuration of the EnvVar type for use +// EnvVarApplyConfiguration represents a declarative configuration of the EnvVar type for use // with apply. type EnvVarApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -26,7 +26,7 @@ type EnvVarApplyConfiguration struct { ValueFrom *EnvVarSourceApplyConfiguration `json:"valueFrom,omitempty"` } -// EnvVarApplyConfiguration constructs an declarative configuration of the EnvVar type for use with +// EnvVarApplyConfiguration constructs a declarative configuration of the EnvVar type for use with // apply. func EnvVar() *EnvVarApplyConfiguration { return &EnvVarApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go index 70c695bd5..a3a55ea7a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarSourceApplyConfiguration represents an declarative configuration of the EnvVarSource type for use +// EnvVarSourceApplyConfiguration represents a declarative configuration of the EnvVarSource type for use // with apply. type EnvVarSourceApplyConfiguration struct { FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"` @@ -27,7 +27,7 @@ type EnvVarSourceApplyConfiguration struct { SecretKeyRef *SecretKeySelectorApplyConfiguration `json:"secretKeyRef,omitempty"` } -// EnvVarSourceApplyConfiguration constructs an declarative configuration of the EnvVarSource type for use with +// EnvVarSourceApplyConfiguration constructs a declarative configuration of the EnvVarSource type for use with // apply. func EnvVarSource() *EnvVarSourceApplyConfiguration { return &EnvVarSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 6c24cd419..a15ac6ec3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerApplyConfiguration represents an declarative configuration of the EphemeralContainer type for use +// EphemeralContainerApplyConfiguration represents a declarative configuration of the EphemeralContainer type for use // with apply. type EphemeralContainerApplyConfiguration struct { EphemeralContainerCommonApplyConfiguration `json:",inline"` TargetContainerName *string `json:"targetContainerName,omitempty"` } -// EphemeralContainerApplyConfiguration constructs an declarative configuration of the EphemeralContainer type for use with +// EphemeralContainerApplyConfiguration constructs a declarative configuration of the EphemeralContainer type for use with // apply. func EphemeralContainer() *EphemeralContainerApplyConfiguration { return &EphemeralContainerApplyConfiguration{} @@ -126,6 +126,27 @@ func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequ return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *EphemeralContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerApplyConfiguration { + b.RestartPolicy = &value + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 67e658cfa..d5d13d27a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -22,34 +22,36 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use +// EphemeralContainerCommonApplyConfiguration represents a declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Image *string `json:"image,omitempty"` - Command []string `json:"command,omitempty"` - Args []string `json:"args,omitempty"` - WorkingDir *string `json:"workingDir,omitempty"` - Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` - EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` - Env []EnvVarApplyConfiguration `json:"env,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` - VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` - LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` - ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` - StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` - Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` - TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` - TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` - ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` - SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` - Stdin *bool `json:"stdin,omitempty"` - StdinOnce *bool `json:"stdinOnce,omitempty"` - TTY *bool `json:"tty,omitempty"` + Name *string `json:"name,omitempty"` + Image *string `json:"image,omitempty"` + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + WorkingDir *string `json:"workingDir,omitempty"` + Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"` + EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"` + Env []EnvVarApplyConfiguration `json:"env,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"` + RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"` + VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"` + VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"` + LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"` + ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"` + StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"` + Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"` + TerminationMessagePath *string `json:"terminationMessagePath,omitempty"` + TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"` + ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"` + Stdin *bool `json:"stdin,omitempty"` + StdinOnce *bool `json:"stdinOnce,omitempty"` + TTY *bool `json:"tty,omitempty"` } -// EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with +// EphemeralContainerCommonApplyConfiguration constructs a declarative configuration of the EphemeralContainerCommon type for use with // apply. func EphemeralContainerCommon() *EphemeralContainerCommonApplyConfiguration { return &EphemeralContainerCommonApplyConfiguration{} @@ -146,6 +148,27 @@ func (b *EphemeralContainerCommonApplyConfiguration) WithResources(value *Resour return b } +// WithResizePolicy adds the given value to the ResizePolicy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResizePolicy field. +func (b *EphemeralContainerCommonApplyConfiguration) WithResizePolicy(values ...*ContainerResizePolicyApplyConfiguration) *EphemeralContainerCommonApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResizePolicy") + } + b.ResizePolicy = append(b.ResizePolicy, *values[i]) + } + return b +} + +// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RestartPolicy field is set to the value of the last call. +func (b *EphemeralContainerCommonApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerCommonApplyConfiguration { + b.RestartPolicy = &value + return b +} + // WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeMounts field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go index 31859404c..d2c8c6722 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EphemeralVolumeSourceApplyConfiguration represents an declarative configuration of the EphemeralVolumeSource type for use +// EphemeralVolumeSourceApplyConfiguration represents a declarative configuration of the EphemeralVolumeSource type for use // with apply. type EphemeralVolumeSourceApplyConfiguration struct { VolumeClaimTemplate *PersistentVolumeClaimTemplateApplyConfiguration `json:"volumeClaimTemplate,omitempty"` } -// EphemeralVolumeSourceApplyConfiguration constructs an declarative configuration of the EphemeralVolumeSource type for use with +// EphemeralVolumeSourceApplyConfiguration constructs a declarative configuration of the EphemeralVolumeSource type for use with // apply. func EphemeralVolumeSource() *EphemeralVolumeSourceApplyConfiguration { return &EphemeralVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go index 3a0c53694..65d6577ab 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -48,7 +48,7 @@ type EventApplyConfiguration struct { ReportingInstance *string `json:"reportingInstance,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -138,15 +138,6 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EventApplyConfiguration) WithSelfLink(value string) *EventApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -256,15 +247,6 @@ func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EventApplyConfiguration) WithClusterName(value string) *EventApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -382,3 +364,9 @@ func (b *EventApplyConfiguration) WithReportingInstance(value string) *EventAppl b.ReportingInstance = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go index e66fb4127..18069c0d1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go index 2eb4aa8e4..97edb0493 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// EventSourceApplyConfiguration represents an declarative configuration of the EventSource type for use +// EventSourceApplyConfiguration represents a declarative configuration of the EventSource type for use // with apply. type EventSourceApplyConfiguration struct { Component *string `json:"component,omitempty"` Host *string `json:"host,omitempty"` } -// EventSourceApplyConfiguration constructs an declarative configuration of the EventSource type for use with +// EventSourceApplyConfiguration constructs a declarative configuration of the EventSource type for use with // apply. func EventSource() *EventSourceApplyConfiguration { return &EventSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go index 1df52144d..b7208a91c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ExecActionApplyConfiguration represents an declarative configuration of the ExecAction type for use +// ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use // with apply. type ExecActionApplyConfiguration struct { Command []string `json:"command,omitempty"` } -// ExecActionApplyConfiguration constructs an declarative configuration of the ExecAction type for use with +// ExecActionApplyConfiguration constructs a declarative configuration of the ExecAction type for use with // apply. func ExecAction() *ExecActionApplyConfiguration { return &ExecActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go index 43069de9a..000ff2cc6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FCVolumeSourceApplyConfiguration represents an declarative configuration of the FCVolumeSource type for use +// FCVolumeSourceApplyConfiguration represents a declarative configuration of the FCVolumeSource type for use // with apply. type FCVolumeSourceApplyConfiguration struct { TargetWWNs []string `json:"targetWWNs,omitempty"` @@ -28,7 +28,7 @@ type FCVolumeSourceApplyConfiguration struct { WWIDs []string `json:"wwids,omitempty"` } -// FCVolumeSourceApplyConfiguration constructs an declarative configuration of the FCVolumeSource type for use with +// FCVolumeSourceApplyConfiguration constructs a declarative configuration of the FCVolumeSource type for use with // apply. func FCVolumeSource() *FCVolumeSourceApplyConfiguration { return &FCVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go index 47e7c746e..355c2c82d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the FlexPersistentVolumeSource type for use +// FlexPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the FlexPersistentVolumeSource type for use // with apply. type FlexPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexPersistentVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexPersistentVolumeSource type for use with +// FlexPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexPersistentVolumeSource type for use with // apply. func FlexPersistentVolumeSource() *FlexPersistentVolumeSourceApplyConfiguration { return &FlexPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go index 7c09516a9..08ae9e1be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexVolumeSourceApplyConfiguration represents an declarative configuration of the FlexVolumeSource type for use +// FlexVolumeSourceApplyConfiguration represents a declarative configuration of the FlexVolumeSource type for use // with apply. type FlexVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexVolumeSource type for use with +// FlexVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexVolumeSource type for use with // apply. func FlexVolumeSource() *FlexVolumeSourceApplyConfiguration { return &FlexVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go index 74896d55a..e4ecbba0e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// FlockerVolumeSourceApplyConfiguration represents an declarative configuration of the FlockerVolumeSource type for use +// FlockerVolumeSourceApplyConfiguration represents a declarative configuration of the FlockerVolumeSource type for use // with apply. type FlockerVolumeSourceApplyConfiguration struct { DatasetName *string `json:"datasetName,omitempty"` DatasetUUID *string `json:"datasetUUID,omitempty"` } -// FlockerVolumeSourceApplyConfiguration constructs an declarative configuration of the FlockerVolumeSource type for use with +// FlockerVolumeSourceApplyConfiguration constructs a declarative configuration of the FlockerVolumeSource type for use with // apply. func FlockerVolumeSource() *FlockerVolumeSourceApplyConfiguration { return &FlockerVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go index 0869d3eaa..56c4d03fa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GCEPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the GCEPersistentDiskVolumeSource type for use +// GCEPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the GCEPersistentDiskVolumeSource type for use // with apply. type GCEPersistentDiskVolumeSourceApplyConfiguration struct { PDName *string `json:"pdName,omitempty"` @@ -27,7 +27,7 @@ type GCEPersistentDiskVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GCEPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the GCEPersistentDiskVolumeSource type for use with +// GCEPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the GCEPersistentDiskVolumeSource type for use with // apply. func GCEPersistentDiskVolumeSource() *GCEPersistentDiskVolumeSourceApplyConfiguration { return &GCEPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go index 825e02e4e..4ed92317c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GitRepoVolumeSourceApplyConfiguration represents an declarative configuration of the GitRepoVolumeSource type for use +// GitRepoVolumeSourceApplyConfiguration represents a declarative configuration of the GitRepoVolumeSource type for use // with apply. type GitRepoVolumeSourceApplyConfiguration struct { Repository *string `json:"repository,omitempty"` @@ -26,7 +26,7 @@ type GitRepoVolumeSourceApplyConfiguration struct { Directory *string `json:"directory,omitempty"` } -// GitRepoVolumeSourceApplyConfiguration constructs an declarative configuration of the GitRepoVolumeSource type for use with +// GitRepoVolumeSourceApplyConfiguration constructs a declarative configuration of the GitRepoVolumeSource type for use with // apply. func GitRepoVolumeSource() *GitRepoVolumeSourceApplyConfiguration { return &GitRepoVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go index 21a3925e5..c9a23ca5d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsPersistentVolumeSource type for use +// GlusterfsPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsPersistentVolumeSource type for use // with apply. type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -27,7 +27,7 @@ type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsNamespace *string `json:"endpointsNamespace,omitempty"` } -// GlusterfsPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsPersistentVolumeSource type for use with +// GlusterfsPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsPersistentVolumeSource type for use with // apply. func GlusterfsPersistentVolumeSource() *GlusterfsPersistentVolumeSourceApplyConfiguration { return &GlusterfsPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go index 7ce6f0b39..8c27f8c70 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsVolumeSource type for use +// GlusterfsVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsVolumeSource type for use // with apply. type GlusterfsVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -26,7 +26,7 @@ type GlusterfsVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GlusterfsVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsVolumeSource type for use with +// GlusterfsVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsVolumeSource type for use with // apply. func GlusterfsVolumeSource() *GlusterfsVolumeSourceApplyConfiguration { return &GlusterfsVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go index f94e55937..0f3a88671 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// GRPCActionApplyConfiguration represents an declarative configuration of the GRPCAction type for use +// GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use // with apply. type GRPCActionApplyConfiguration struct { Port *int32 `json:"port,omitempty"` Service *string `json:"service,omitempty"` } -// GRPCActionApplyConfiguration constructs an declarative configuration of the GRPCAction type for use with +// GRPCActionApplyConfiguration constructs a declarative configuration of the GRPCAction type for use with // apply. func GRPCAction() *GRPCActionApplyConfiguration { return &GRPCActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go index 861508ef5..ec9ea1741 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HostAliasApplyConfiguration represents an declarative configuration of the HostAlias type for use +// HostAliasApplyConfiguration represents a declarative configuration of the HostAlias type for use // with apply. type HostAliasApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostnames []string `json:"hostnames,omitempty"` } -// HostAliasApplyConfiguration constructs an declarative configuration of the HostAlias type for use with +// HostAliasApplyConfiguration constructs a declarative configuration of the HostAlias type for use with // apply. func HostAlias() *HostAliasApplyConfiguration { return &HostAliasApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go new file mode 100644 index 000000000..439b5ce2d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// HostIPApplyConfiguration represents a declarative configuration of the HostIP type for use +// with apply. +type HostIPApplyConfiguration struct { + IP *string `json:"ip,omitempty"` +} + +// HostIPApplyConfiguration constructs a declarative configuration of the HostIP type for use with +// apply. +func HostIP() *HostIPApplyConfiguration { + return &HostIPApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *HostIPApplyConfiguration) WithIP(value string) *HostIPApplyConfiguration { + b.IP = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go index 8b15689ee..10dfedfde 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// HostPathVolumeSourceApplyConfiguration represents an declarative configuration of the HostPathVolumeSource type for use +// HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use // with apply. type HostPathVolumeSourceApplyConfiguration struct { Path *string `json:"path,omitempty"` Type *v1.HostPathType `json:"type,omitempty"` } -// HostPathVolumeSourceApplyConfiguration constructs an declarative configuration of the HostPathVolumeSource type for use with +// HostPathVolumeSourceApplyConfiguration constructs a declarative configuration of the HostPathVolumeSource type for use with // apply. func HostPathVolumeSource() *HostPathVolumeSourceApplyConfiguration { return &HostPathVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go index e4ecdd430..5ecbc27fe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// HTTPGetActionApplyConfiguration represents an declarative configuration of the HTTPGetAction type for use +// HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use // with apply. type HTTPGetActionApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -33,7 +33,7 @@ type HTTPGetActionApplyConfiguration struct { HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"` } -// HTTPGetActionApplyConfiguration constructs an declarative configuration of the HTTPGetAction type for use with +// HTTPGetActionApplyConfiguration constructs a declarative configuration of the HTTPGetAction type for use with // apply. func HTTPGetAction() *HTTPGetActionApplyConfiguration { return &HTTPGetActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go index d55f36bfd..252637166 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HTTPHeaderApplyConfiguration represents an declarative configuration of the HTTPHeader type for use +// HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use // with apply. type HTTPHeaderApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// HTTPHeaderApplyConfiguration constructs an declarative configuration of the HTTPHeader type for use with +// HTTPHeaderApplyConfiguration constructs a declarative configuration of the HTTPHeader type for use with // apply. func HTTPHeader() *HTTPHeaderApplyConfiguration { return &HTTPHeaderApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go new file mode 100644 index 000000000..340f15040 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use +// with apply. +type ImageVolumeSourceApplyConfiguration struct { + Reference *string `json:"reference,omitempty"` + PullPolicy *v1.PullPolicy `json:"pullPolicy,omitempty"` +} + +// ImageVolumeSourceApplyConfiguration constructs a declarative configuration of the ImageVolumeSource type for use with +// apply. +func ImageVolumeSource() *ImageVolumeSourceApplyConfiguration { + return &ImageVolumeSourceApplyConfiguration{} +} + +// WithReference sets the Reference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reference field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithReference(value string) *ImageVolumeSourceApplyConfiguration { + b.Reference = &value + return b +} + +// WithPullPolicy sets the PullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullPolicy field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value v1.PullPolicy) *ImageVolumeSourceApplyConfiguration { + b.PullPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go index c7b248181..42f420c56 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIPersistentVolumeSource type for use +// ISCSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIPersistentVolumeSource type for use // with apply. type ISCSIPersistentVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIPersistentVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIPersistentVolumeSource type for use with +// ISCSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIPersistentVolumeSource type for use with // apply. func ISCSIPersistentVolumeSource() *ISCSIPersistentVolumeSourceApplyConfiguration { return &ISCSIPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go index c95941a9c..61055434b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIVolumeSource type for use +// ISCSIVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIVolumeSource type for use // with apply. type ISCSIVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIVolumeSource type for use with +// ISCSIVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIVolumeSource type for use with // apply. func ISCSIVolumeSource() *ISCSIVolumeSourceApplyConfiguration { return &ISCSIVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go index d58676d34..c961b0795 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// KeyToPathApplyConfiguration represents an declarative configuration of the KeyToPath type for use +// KeyToPathApplyConfiguration represents a declarative configuration of the KeyToPath type for use // with apply. type KeyToPathApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -26,7 +26,7 @@ type KeyToPathApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// KeyToPathApplyConfiguration constructs an declarative configuration of the KeyToPath type for use with +// KeyToPathApplyConfiguration constructs a declarative configuration of the KeyToPath type for use with // apply. func KeyToPath() *KeyToPathApplyConfiguration { return &KeyToPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go index db9abf8af..e37a30f59 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LifecycleApplyConfiguration represents an declarative configuration of the Lifecycle type for use +// LifecycleApplyConfiguration represents a declarative configuration of the Lifecycle type for use // with apply. type LifecycleApplyConfiguration struct { PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"` PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"` } -// LifecycleApplyConfiguration constructs an declarative configuration of the Lifecycle type for use with +// LifecycleApplyConfiguration constructs a declarative configuration of the Lifecycle type for use with // apply. func Lifecycle() *LifecycleApplyConfiguration { return &LifecycleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index 6e373dd4e..b7c706d58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -18,15 +18,16 @@ limitations under the License. package v1 -// LifecycleHandlerApplyConfiguration represents an declarative configuration of the LifecycleHandler type for use +// LifecycleHandlerApplyConfiguration represents a declarative configuration of the LifecycleHandler type for use // with apply. type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` + Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } -// LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with +// LifecycleHandlerApplyConfiguration constructs a declarative configuration of the LifecycleHandler type for use with // apply. func LifecycleHandler() *LifecycleHandlerApplyConfiguration { return &LifecycleHandlerApplyConfiguration{} @@ -55,3 +56,11 @@ func (b *LifecycleHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActio b.TCPSocket = value return b } + +// WithSleep sets the Sleep field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Sleep field is set to the value of the last call. +func (b *LifecycleHandlerApplyConfiguration) WithSleep(value *SleepActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { + b.Sleep = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go index 03207b8ec..7770200a0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LimitRangeApplyConfiguration represents an declarative configuration of the LimitRange type for use +// LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use // with apply. type LimitRangeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LimitRangeApplyConfiguration struct { Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"` } -// LimitRange constructs an declarative configuration of the LimitRange type for use with +// LimitRange constructs a declarative configuration of the LimitRange type for use with // apply. func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { b := &LimitRangeApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *LimitRangeApplyConfiguration) WithNamespace(value string) *LimitRangeAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *LimitRangeApplyConfiguration) WithSelfLink(value string) *LimitRangeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *LimitRangeApplyConfiguration) WithFinalizers(values ...string) *LimitRa return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *LimitRangeApplyConfiguration) WithClusterName(value string) *LimitRangeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *LimitRangeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *LimitRangeApplyConfiguration) WithSpec(value *LimitRangeSpecApplyConfig b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LimitRangeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go index 084650fda..61d8344e8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// LimitRangeItemApplyConfiguration represents an declarative configuration of the LimitRangeItem type for use +// LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use // with apply. type LimitRangeItemApplyConfiguration struct { Type *v1.LimitType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type LimitRangeItemApplyConfiguration struct { MaxLimitRequestRatio *v1.ResourceList `json:"maxLimitRequestRatio,omitempty"` } -// LimitRangeItemApplyConfiguration constructs an declarative configuration of the LimitRangeItem type for use with +// LimitRangeItemApplyConfiguration constructs a declarative configuration of the LimitRangeItem type for use with // apply. func LimitRangeItem() *LimitRangeItemApplyConfiguration { return &LimitRangeItemApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go index 5eee5c498..8d69c1c0c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LimitRangeSpecApplyConfiguration represents an declarative configuration of the LimitRangeSpec type for use +// LimitRangeSpecApplyConfiguration represents a declarative configuration of the LimitRangeSpec type for use // with apply. type LimitRangeSpecApplyConfiguration struct { Limits []LimitRangeItemApplyConfiguration `json:"limits,omitempty"` } -// LimitRangeSpecApplyConfiguration constructs an declarative configuration of the LimitRangeSpec type for use with +// LimitRangeSpecApplyConfiguration constructs a declarative configuration of the LimitRangeSpec type for use with // apply. func LimitRangeSpec() *LimitRangeSpecApplyConfiguration { return &LimitRangeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go new file mode 100644 index 000000000..fbab4815a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LinuxContainerUserApplyConfiguration represents a declarative configuration of the LinuxContainerUser type for use +// with apply. +type LinuxContainerUserApplyConfiguration struct { + UID *int64 `json:"uid,omitempty"` + GID *int64 `json:"gid,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` +} + +// LinuxContainerUserApplyConfiguration constructs a declarative configuration of the LinuxContainerUser type for use with +// apply. +func LinuxContainerUser() *LinuxContainerUserApplyConfiguration { + return &LinuxContainerUserApplyConfiguration{} +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithUID(value int64) *LinuxContainerUserApplyConfiguration { + b.UID = &value + return b +} + +// WithGID sets the GID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithGID(value int64) *LinuxContainerUserApplyConfiguration { + b.GID = &value + return b +} + +// WithSupplementalGroups adds the given value to the SupplementalGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SupplementalGroups field. +func (b *LinuxContainerUserApplyConfiguration) WithSupplementalGroups(values ...int64) *LinuxContainerUserApplyConfiguration { + for i := range values { + b.SupplementalGroups = append(b.SupplementalGroups, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index 64d27bdad..1a7d99815 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -18,15 +18,20 @@ limitations under the License. package v1 -// LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use +import ( + v1 "k8s.io/api/core/v1" +) + +// LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use // with apply. type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostname *string `json:"hostname,omitempty"` + IPMode *v1.LoadBalancerIPMode `json:"ipMode,omitempty"` Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } -// LoadBalancerIngressApplyConfiguration constructs an declarative configuration of the LoadBalancerIngress type for use with +// LoadBalancerIngressApplyConfiguration constructs a declarative configuration of the LoadBalancerIngress type for use with // apply. func LoadBalancerIngress() *LoadBalancerIngressApplyConfiguration { return &LoadBalancerIngressApplyConfiguration{} @@ -48,6 +53,14 @@ func (b *LoadBalancerIngressApplyConfiguration) WithHostname(value string) *Load return b } +// WithIPMode sets the IPMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPMode field is set to the value of the last call. +func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value v1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { + b.IPMode = &value + return b +} + // WithPorts adds the given value to the Ports field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Ports field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go index 2fcc0cad1..bb3d616c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LoadBalancerStatusApplyConfiguration represents an declarative configuration of the LoadBalancerStatus type for use +// LoadBalancerStatusApplyConfiguration represents a declarative configuration of the LoadBalancerStatus type for use // with apply. type LoadBalancerStatusApplyConfiguration struct { Ingress []LoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// LoadBalancerStatusApplyConfiguration constructs an declarative configuration of the LoadBalancerStatus type for use with +// LoadBalancerStatusApplyConfiguration constructs a declarative configuration of the LoadBalancerStatus type for use with // apply. func LoadBalancerStatus() *LoadBalancerStatusApplyConfiguration { return &LoadBalancerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go index 7662e32b3..c55d6803d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LocalObjectReferenceApplyConfiguration represents an declarative configuration of the LocalObjectReference type for use +// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use // with apply. type LocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// LocalObjectReferenceApplyConfiguration constructs an declarative configuration of the LocalObjectReference type for use with +// LocalObjectReferenceApplyConfiguration constructs a declarative configuration of the LocalObjectReference type for use with // apply. func LocalObjectReference() *LocalObjectReferenceApplyConfiguration { return &LocalObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go index 5d289bd12..db711d993 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LocalVolumeSourceApplyConfiguration represents an declarative configuration of the LocalVolumeSource type for use +// LocalVolumeSourceApplyConfiguration represents a declarative configuration of the LocalVolumeSource type for use // with apply. type LocalVolumeSourceApplyConfiguration struct { Path *string `json:"path,omitempty"` FSType *string `json:"fsType,omitempty"` } -// LocalVolumeSourceApplyConfiguration constructs an declarative configuration of the LocalVolumeSource type for use with +// LocalVolumeSourceApplyConfiguration constructs a declarative configuration of the LocalVolumeSource type for use with // apply. func LocalVolumeSource() *LocalVolumeSourceApplyConfiguration { return &LocalVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go new file mode 100644 index 000000000..704c32165 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use +// with apply. +type ModifyVolumeStatusApplyConfiguration struct { + TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` + Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` +} + +// ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with +// apply. +func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration { + return &ModifyVolumeStatusApplyConfiguration{} +} + +// WithTargetVolumeAttributesClassName sets the TargetVolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetVolumeAttributesClassName field is set to the value of the last call. +func (b *ModifyVolumeStatusApplyConfiguration) WithTargetVolumeAttributesClassName(value string) *ModifyVolumeStatusApplyConfiguration { + b.TargetVolumeAttributesClassName = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value v1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go index ec29bcfd9..0b77af183 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NamespaceApplyConfiguration represents an declarative configuration of the Namespace type for use +// NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use // with apply. type NamespaceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type NamespaceApplyConfiguration struct { Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"` } -// Namespace constructs an declarative configuration of the Namespace type for use with +// Namespace constructs a declarative configuration of the Namespace type for use with // apply. func Namespace(name string) *NamespaceApplyConfiguration { b := &NamespaceApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *NamespaceApplyConfiguration) WithNamespace(value string) *NamespaceAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *NamespaceApplyConfiguration) WithSelfLink(value string) *NamespaceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *NamespaceApplyConfiguration) WithFinalizers(values ...string) *Namespac return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *NamespaceApplyConfiguration) WithClusterName(value string) *NamespaceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *NamespaceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *NamespaceApplyConfiguration) WithStatus(value *NamespaceStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NamespaceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go index 8651978b0..9784c3e6f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NamespaceConditionApplyConfiguration represents an declarative configuration of the NamespaceCondition type for use +// NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use // with apply. type NamespaceConditionApplyConfiguration struct { Type *v1.NamespaceConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type NamespaceConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// NamespaceConditionApplyConfiguration constructs an declarative configuration of the NamespaceCondition type for use with +// NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with // apply. func NamespaceCondition() *NamespaceConditionApplyConfiguration { return &NamespaceConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go index 9bc02d1fa..6d7b7f1f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// NamespaceSpecApplyConfiguration represents an declarative configuration of the NamespaceSpec type for use +// NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use // with apply. type NamespaceSpecApplyConfiguration struct { Finalizers []v1.FinalizerName `json:"finalizers,omitempty"` } -// NamespaceSpecApplyConfiguration constructs an declarative configuration of the NamespaceSpec type for use with +// NamespaceSpecApplyConfiguration constructs a declarative configuration of the NamespaceSpec type for use with // apply. func NamespaceSpec() *NamespaceSpecApplyConfiguration { return &NamespaceSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go index d950fd316..314908109 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// NamespaceStatusApplyConfiguration represents an declarative configuration of the NamespaceStatus type for use +// NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use // with apply. type NamespaceStatusApplyConfiguration struct { Phase *v1.NamespacePhase `json:"phase,omitempty"` Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"` } -// NamespaceStatusApplyConfiguration constructs an declarative configuration of the NamespaceStatus type for use with +// NamespaceStatusApplyConfiguration constructs a declarative configuration of the NamespaceStatus type for use with // apply. func NamespaceStatus() *NamespaceStatusApplyConfiguration { return &NamespaceStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go index cb300ee81..ed49a87a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NFSVolumeSourceApplyConfiguration represents an declarative configuration of the NFSVolumeSource type for use +// NFSVolumeSourceApplyConfiguration represents a declarative configuration of the NFSVolumeSource type for use // with apply. type NFSVolumeSourceApplyConfiguration struct { Server *string `json:"server,omitempty"` @@ -26,7 +26,7 @@ type NFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// NFSVolumeSourceApplyConfiguration constructs an declarative configuration of the NFSVolumeSource type for use with +// NFSVolumeSourceApplyConfiguration constructs a declarative configuration of the NFSVolumeSource type for use with // apply. func NFSVolumeSource() *NFSVolumeSourceApplyConfiguration { return &NFSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go index b26e9f499..ef1339259 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NodeApplyConfiguration represents an declarative configuration of the Node type for use +// NodeApplyConfiguration represents a declarative configuration of the Node type for use // with apply. type NodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type NodeApplyConfiguration struct { Status *NodeStatusApplyConfiguration `json:"status,omitempty"` } -// Node constructs an declarative configuration of the Node type for use with +// Node constructs a declarative configuration of the Node type for use with // apply. func Node(name string) *NodeApplyConfiguration { b := &NodeApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfigura return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithSelfLink(value string) *NodeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *NodeApplyConfiguration) WithFinalizers(values ...string) *NodeApplyConf return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithClusterName(value string) *NodeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *NodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go index a1d4fbe04..a9cb036c5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeAddressApplyConfiguration represents an declarative configuration of the NodeAddress type for use +// NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use // with apply. type NodeAddressApplyConfiguration struct { Type *v1.NodeAddressType `json:"type,omitempty"` Address *string `json:"address,omitempty"` } -// NodeAddressApplyConfiguration constructs an declarative configuration of the NodeAddress type for use with +// NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with // apply. func NodeAddress() *NodeAddressApplyConfiguration { return &NodeAddressApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go index e28ced6e4..5d11d746d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeAffinityApplyConfiguration represents an declarative configuration of the NodeAffinity type for use +// NodeAffinityApplyConfiguration represents a declarative configuration of the NodeAffinity type for use // with apply. type NodeAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution *NodeSelectorApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// NodeAffinityApplyConfiguration constructs an declarative configuration of the NodeAffinity type for use with +// NodeAffinityApplyConfiguration constructs a declarative configuration of the NodeAffinity type for use with // apply. func NodeAffinity() *NodeAffinityApplyConfiguration { return &NodeAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go index eb81ca543..a1b8ed0f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NodeConditionApplyConfiguration represents an declarative configuration of the NodeCondition type for use +// NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use // with apply. type NodeConditionApplyConfiguration struct { Type *v1.NodeConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type NodeConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// NodeConditionApplyConfiguration constructs an declarative configuration of the NodeCondition type for use with +// NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with // apply. func NodeCondition() *NodeConditionApplyConfiguration { return &NodeConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go index 60567aa43..00a671fc0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeConfigSourceApplyConfiguration represents an declarative configuration of the NodeConfigSource type for use +// NodeConfigSourceApplyConfiguration represents a declarative configuration of the NodeConfigSource type for use // with apply. type NodeConfigSourceApplyConfiguration struct { ConfigMap *ConfigMapNodeConfigSourceApplyConfiguration `json:"configMap,omitempty"` } -// NodeConfigSourceApplyConfiguration constructs an declarative configuration of the NodeConfigSource type for use with +// NodeConfigSourceApplyConfiguration constructs a declarative configuration of the NodeConfigSource type for use with // apply. func NodeConfigSource() *NodeConfigSourceApplyConfiguration { return &NodeConfigSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go index 71447fe9c..d5ccc45c6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeConfigStatusApplyConfiguration represents an declarative configuration of the NodeConfigStatus type for use +// NodeConfigStatusApplyConfiguration represents a declarative configuration of the NodeConfigStatus type for use // with apply. type NodeConfigStatusApplyConfiguration struct { Assigned *NodeConfigSourceApplyConfiguration `json:"assigned,omitempty"` @@ -27,7 +27,7 @@ type NodeConfigStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// NodeConfigStatusApplyConfiguration constructs an declarative configuration of the NodeConfigStatus type for use with +// NodeConfigStatusApplyConfiguration constructs a declarative configuration of the NodeConfigStatus type for use with // apply. func NodeConfigStatus() *NodeConfigStatusApplyConfiguration { return &NodeConfigStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go index 4cabc7f52..11228b369 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeDaemonEndpointsApplyConfiguration represents an declarative configuration of the NodeDaemonEndpoints type for use +// NodeDaemonEndpointsApplyConfiguration represents a declarative configuration of the NodeDaemonEndpoints type for use // with apply. type NodeDaemonEndpointsApplyConfiguration struct { KubeletEndpoint *DaemonEndpointApplyConfiguration `json:"kubeletEndpoint,omitempty"` } -// NodeDaemonEndpointsApplyConfiguration constructs an declarative configuration of the NodeDaemonEndpoints type for use with +// NodeDaemonEndpointsApplyConfiguration constructs a declarative configuration of the NodeDaemonEndpoints type for use with // apply. func NodeDaemonEndpoints() *NodeDaemonEndpointsApplyConfiguration { return &NodeDaemonEndpointsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go new file mode 100644 index 000000000..678b0e36d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeFeaturesApplyConfiguration represents a declarative configuration of the NodeFeatures type for use +// with apply. +type NodeFeaturesApplyConfiguration struct { + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty"` +} + +// NodeFeaturesApplyConfiguration constructs a declarative configuration of the NodeFeatures type for use with +// apply. +func NodeFeatures() *NodeFeaturesApplyConfiguration { + return &NodeFeaturesApplyConfiguration{} +} + +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *NodeFeaturesApplyConfiguration) WithSupplementalGroupsPolicy(value bool) *NodeFeaturesApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go new file mode 100644 index 000000000..c7c664974 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeRuntimeHandlerApplyConfiguration represents a declarative configuration of the NodeRuntimeHandler type for use +// with apply. +type NodeRuntimeHandlerApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Features *NodeRuntimeHandlerFeaturesApplyConfiguration `json:"features,omitempty"` +} + +// NodeRuntimeHandlerApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandler type for use with +// apply. +func NodeRuntimeHandler() *NodeRuntimeHandlerApplyConfiguration { + return &NodeRuntimeHandlerApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *NodeRuntimeHandlerApplyConfiguration) WithName(value string) *NodeRuntimeHandlerApplyConfiguration { + b.Name = &value + return b +} + +// WithFeatures sets the Features field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Features field is set to the value of the last call. +func (b *NodeRuntimeHandlerApplyConfiguration) WithFeatures(value *NodeRuntimeHandlerFeaturesApplyConfiguration) *NodeRuntimeHandlerApplyConfiguration { + b.Features = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go new file mode 100644 index 000000000..a295b6096 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeRuntimeHandlerFeaturesApplyConfiguration represents a declarative configuration of the NodeRuntimeHandlerFeatures type for use +// with apply. +type NodeRuntimeHandlerFeaturesApplyConfiguration struct { + RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty"` + UserNamespaces *bool `json:"userNamespaces,omitempty"` +} + +// NodeRuntimeHandlerFeaturesApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandlerFeatures type for use with +// apply. +func NodeRuntimeHandlerFeatures() *NodeRuntimeHandlerFeaturesApplyConfiguration { + return &NodeRuntimeHandlerFeaturesApplyConfiguration{} +} + +// WithRecursiveReadOnlyMounts sets the RecursiveReadOnlyMounts field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecursiveReadOnlyMounts field is set to the value of the last call. +func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithRecursiveReadOnlyMounts(value bool) *NodeRuntimeHandlerFeaturesApplyConfiguration { + b.RecursiveReadOnlyMounts = &value + return b +} + +// WithUserNamespaces sets the UserNamespaces field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserNamespaces field is set to the value of the last call. +func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithUserNamespaces(value bool) *NodeRuntimeHandlerFeaturesApplyConfiguration { + b.UserNamespaces = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go index 5489097f5..6eab10979 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeSelectorApplyConfiguration represents an declarative configuration of the NodeSelector type for use +// NodeSelectorApplyConfiguration represents a declarative configuration of the NodeSelector type for use // with apply. type NodeSelectorApplyConfiguration struct { NodeSelectorTerms []NodeSelectorTermApplyConfiguration `json:"nodeSelectorTerms,omitempty"` } -// NodeSelectorApplyConfiguration constructs an declarative configuration of the NodeSelector type for use with +// NodeSelectorApplyConfiguration constructs a declarative configuration of the NodeSelector type for use with // apply. func NodeSelector() *NodeSelectorApplyConfiguration { return &NodeSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go index a6e43e607..7c383e06c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeSelectorRequirementApplyConfiguration represents an declarative configuration of the NodeSelectorRequirement type for use +// NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use // with apply. type NodeSelectorRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -30,7 +30,7 @@ type NodeSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// NodeSelectorRequirementApplyConfiguration constructs an declarative configuration of the NodeSelectorRequirement type for use with +// NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with // apply. func NodeSelectorRequirement() *NodeSelectorRequirementApplyConfiguration { return &NodeSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go index 13b3ddbc1..9d0d780f3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeSelectorTermApplyConfiguration represents an declarative configuration of the NodeSelectorTerm type for use +// NodeSelectorTermApplyConfiguration represents a declarative configuration of the NodeSelectorTerm type for use // with apply. type NodeSelectorTermApplyConfiguration struct { MatchExpressions []NodeSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` MatchFields []NodeSelectorRequirementApplyConfiguration `json:"matchFields,omitempty"` } -// NodeSelectorTermApplyConfiguration constructs an declarative configuration of the NodeSelectorTerm type for use with +// NodeSelectorTermApplyConfiguration constructs a declarative configuration of the NodeSelectorTerm type for use with // apply. func NodeSelectorTerm() *NodeSelectorTermApplyConfiguration { return &NodeSelectorTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go index 63b61078d..8ac349712 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSpecApplyConfiguration represents an declarative configuration of the NodeSpec type for use +// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { PodCIDR *string `json:"podCIDR,omitempty"` @@ -30,7 +30,7 @@ type NodeSpecApplyConfiguration struct { DoNotUseExternalID *string `json:"externalID,omitempty"` } -// NodeSpecApplyConfiguration constructs an declarative configuration of the NodeSpec type for use with +// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with // apply. func NodeSpec() *NodeSpecApplyConfiguration { return &NodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go index aa3603f4f..8411c57ac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// NodeStatusApplyConfiguration represents an declarative configuration of the NodeStatus type for use +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use // with apply. type NodeStatusApplyConfiguration struct { Capacity *v1.ResourceList `json:"capacity,omitempty"` @@ -36,9 +36,11 @@ type NodeStatusApplyConfiguration struct { VolumesInUse []v1.UniqueVolumeName `json:"volumesInUse,omitempty"` VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"` Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"` + RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"` + Features *NodeFeaturesApplyConfiguration `json:"features,omitempty"` } -// NodeStatusApplyConfiguration constructs an declarative configuration of the NodeStatus type for use with +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with // apply. func NodeStatus() *NodeStatusApplyConfiguration { return &NodeStatusApplyConfiguration{} @@ -153,3 +155,24 @@ func (b *NodeStatusApplyConfiguration) WithConfig(value *NodeConfigStatusApplyCo b.Config = value return b } + +// WithRuntimeHandlers adds the given value to the RuntimeHandlers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RuntimeHandlers field. +func (b *NodeStatusApplyConfiguration) WithRuntimeHandlers(values ...*NodeRuntimeHandlerApplyConfiguration) *NodeStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRuntimeHandlers") + } + b.RuntimeHandlers = append(b.RuntimeHandlers, *values[i]) + } + return b +} + +// WithFeatures sets the Features field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Features field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithFeatures(value *NodeFeaturesApplyConfiguration) *NodeStatusApplyConfiguration { + b.Features = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go index 2634ea984..11ac50713 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSystemInfoApplyConfiguration represents an declarative configuration of the NodeSystemInfo type for use +// NodeSystemInfoApplyConfiguration represents a declarative configuration of the NodeSystemInfo type for use // with apply. type NodeSystemInfoApplyConfiguration struct { MachineID *string `json:"machineID,omitempty"` @@ -33,7 +33,7 @@ type NodeSystemInfoApplyConfiguration struct { Architecture *string `json:"architecture,omitempty"` } -// NodeSystemInfoApplyConfiguration constructs an declarative configuration of the NodeSystemInfo type for use with +// NodeSystemInfoApplyConfiguration constructs a declarative configuration of the NodeSystemInfo type for use with // apply. func NodeSystemInfo() *NodeSystemInfoApplyConfiguration { return &NodeSystemInfoApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go index 0c2402b3c..c129c998b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ObjectFieldSelectorApplyConfiguration represents an declarative configuration of the ObjectFieldSelector type for use +// ObjectFieldSelectorApplyConfiguration represents a declarative configuration of the ObjectFieldSelector type for use // with apply. type ObjectFieldSelectorApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectFieldSelectorApplyConfiguration constructs an declarative configuration of the ObjectFieldSelector type for use with +// ObjectFieldSelectorApplyConfiguration constructs a declarative configuration of the ObjectFieldSelector type for use with // apply. func ObjectFieldSelector() *ObjectFieldSelectorApplyConfiguration { return &ObjectFieldSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go index 667fa84a8..4cd3f226e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use // with apply. type ObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -34,7 +34,7 @@ type ObjectReferenceApplyConfiguration struct { FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with // apply. func ObjectReference() *ObjectReferenceApplyConfiguration { return &ObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go index dcef6020f..020f87411 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeApplyConfiguration represents an declarative configuration of the PersistentVolume type for use +// PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use // with apply. type PersistentVolumeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PersistentVolumeApplyConfiguration struct { Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolume constructs an declarative configuration of the PersistentVolume type for use with +// PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with // apply. func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { b := &PersistentVolumeApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *PersistentVolumeApplyConfiguration) WithNamespace(value string) *Persis return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PersistentVolumeApplyConfiguration) WithSelfLink(value string) *PersistentVolumeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *PersistentVolumeApplyConfiguration) WithFinalizers(values ...string) *P return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PersistentVolumeApplyConfiguration) WithClusterName(value string) *PersistentVolumeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PersistentVolumeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *PersistentVolumeApplyConfiguration) WithStatus(value *PersistentVolumeS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go index 8ed20fa29..81cf79144 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimApplyConfiguration represents an declarative configuration of the PersistentVolumeClaim type for use +// PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use // with apply. type PersistentVolumeClaimApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PersistentVolumeClaimApplyConfiguration struct { Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolumeClaim constructs an declarative configuration of the PersistentVolumeClaim type for use with +// PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with // apply. func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyConfiguration { b := &PersistentVolumeClaimApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithNamespace(value string) *P return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PersistentVolumeClaimApplyConfiguration) WithSelfLink(value string) *PersistentVolumeClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithFinalizers(values ...strin return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PersistentVolumeClaimApplyConfiguration) WithClusterName(value string) *PersistentVolumeClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PersistentVolumeClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithStatus(value *PersistentVo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go index 65449e92e..80038c067 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeClaimConditionApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimCondition type for use +// PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use // with apply. type PersistentVolumeClaimConditionApplyConfiguration struct { Type *v1.PersistentVolumeClaimConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type PersistentVolumeClaimConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PersistentVolumeClaimConditionApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimCondition type for use with +// PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with // apply. func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfiguration { return &PersistentVolumeClaimConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index e22d04b0d..5ce671cd9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -23,20 +23,21 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use +// PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeName *string `json:"volumeName,omitempty"` - StorageClassName *string `json:"storageClassName,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` - DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` - DataSourceRef *TypedLocalObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeName *string `json:"volumeName,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` + DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with +// PersistentVolumeClaimSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimSpec type for use with // apply. func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration { return &PersistentVolumeClaimSpecApplyConfiguration{} @@ -63,7 +64,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithSelector(value *metav1 // WithResources sets the Resources field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resources field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *VolumeResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { b.Resources = value return b } @@ -103,7 +104,15 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSource(value *Type // WithDataSourceRef sets the DataSourceRef field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DataSourceRef field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *TypedLocalObjectReferenceApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *TypedObjectReferenceApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { b.DataSourceRef = value return b } + +// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeClaimSpecApplyConfiguration { + b.VolumeAttributesClassName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 4c38d89f5..3eebf95ad 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -22,18 +22,20 @@ import ( v1 "k8s.io/api/core/v1" ) -// PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use +// PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` - AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` - ResizeStatus *v1.PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty"` + Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *v1.ResourceList `json:"capacity,omitempty"` + Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` + AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` + CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"` + ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` } -// PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with +// PersistentVolumeClaimStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimStatus type for use with // apply. func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguration { return &PersistentVolumeClaimStatusApplyConfiguration{} @@ -86,10 +88,32 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(v return b } -// WithResizeStatus sets the ResizeStatus field in the declarative configuration to the given value +// WithAllocatedResourceStatuses puts the entries into the AllocatedResourceStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the AllocatedResourceStatuses field, +// overwriting an existing map entries in AllocatedResourceStatuses field with the same key. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[v1.ResourceName]v1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration { + if b.AllocatedResourceStatuses == nil && len(entries) > 0 { + b.AllocatedResourceStatuses = make(map[v1.ResourceName]v1.ClaimResourceStatus, len(entries)) + } + for k, v := range entries { + b.AllocatedResourceStatuses[k] = v + } + return b +} + +// WithCurrentVolumeAttributesClassName sets the CurrentVolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentVolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCurrentVolumeAttributesClassName(value string) *PersistentVolumeClaimStatusApplyConfiguration { + b.CurrentVolumeAttributesClassName = &value + return b +} + +// WithModifyVolumeStatus sets the ModifyVolumeStatus field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResizeStatus field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithResizeStatus(value v1.PersistentVolumeClaimResizeStatus) *PersistentVolumeClaimStatusApplyConfiguration { - b.ResizeStatus = &value +// If called multiple times, the ModifyVolumeStatus field is set to the value of the last call. +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithModifyVolumeStatus(value *ModifyVolumeStatusApplyConfiguration) *PersistentVolumeClaimStatusApplyConfiguration { + b.ModifyVolumeStatus = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go index ac1b6bf01..ed4970291 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimTemplateApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimTemplate type for use +// PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use // with apply. type PersistentVolumeClaimTemplateApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` } -// PersistentVolumeClaimTemplateApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimTemplate type for use with +// PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with // apply. func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfiguration { return &PersistentVolumeClaimTemplateApplyConfiguration{} @@ -64,15 +64,6 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithNamespace(value st return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSelfLink(value string) *PersistentVolumeClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -182,15 +173,6 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithFinalizers(values return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithClusterName(value string) *PersistentVolumeClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PersistentVolumeClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -204,3 +186,9 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSpec(value *Persis b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go index a498fa6a5..ccccdfb49 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PersistentVolumeClaimVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimVolumeSource type for use +// PersistentVolumeClaimVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimVolumeSource type for use // with apply. type PersistentVolumeClaimVolumeSourceApplyConfiguration struct { ClaimName *string `json:"claimName,omitempty"` ReadOnly *bool `json:"readOnly,omitempty"` } -// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimVolumeSource type for use with +// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimVolumeSource type for use with // apply. func PersistentVolumeClaimVolumeSource() *PersistentVolumeClaimVolumeSourceApplyConfiguration { return &PersistentVolumeClaimVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go index 0576e7dd3..aba012462 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PersistentVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeSource type for use +// PersistentVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeSource type for use // with apply. type PersistentVolumeSourceApplyConfiguration struct { GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"` @@ -45,7 +45,7 @@ type PersistentVolumeSourceApplyConfiguration struct { CSI *CSIPersistentVolumeSourceApplyConfiguration `json:"csi,omitempty"` } -// PersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeSource type for use with +// PersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeSource type for use with // apply. func PersistentVolumeSource() *PersistentVolumeSourceApplyConfiguration { return &PersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index b3a72b1c3..074fa55d1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PersistentVolumeSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeSpec type for use +// PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use // with apply. type PersistentVolumeSpecApplyConfiguration struct { Capacity *v1.ResourceList `json:"capacity,omitempty"` @@ -34,9 +34,10 @@ type PersistentVolumeSpecApplyConfiguration struct { MountOptions []string `json:"mountOptions,omitempty"` VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` + VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with +// PersistentVolumeSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeSpec type for use with // apply. func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration { return &PersistentVolumeSpecApplyConfiguration{} @@ -285,3 +286,11 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithNodeAffinity(value *VolumeN b.NodeAffinity = value return b } + +// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. +func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeSpecApplyConfiguration { + b.VolumeAttributesClassName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go index f7048dec4..95ba90f48 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go @@ -20,17 +20,19 @@ package v1 import ( v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use +// PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use // with apply. type PersistentVolumeStatusApplyConfiguration struct { - Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` - Message *string `json:"message,omitempty"` - Reason *string `json:"reason,omitempty"` + Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` + Message *string `json:"message,omitempty"` + Reason *string `json:"reason,omitempty"` + LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` } -// PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with +// PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with // apply. func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration { return &PersistentVolumeStatusApplyConfiguration{} @@ -59,3 +61,11 @@ func (b *PersistentVolumeStatusApplyConfiguration) WithReason(value string) *Per b.Reason = &value return b } + +// WithLastPhaseTransitionTime sets the LastPhaseTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastPhaseTransitionTime field is set to the value of the last call. +func (b *PersistentVolumeStatusApplyConfiguration) WithLastPhaseTransitionTime(value metav1.Time) *PersistentVolumeStatusApplyConfiguration { + b.LastPhaseTransitionTime = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go index 43587d676..d8dc103e2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PhotonPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the PhotonPersistentDiskVolumeSource type for use +// PhotonPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the PhotonPersistentDiskVolumeSource type for use // with apply. type PhotonPersistentDiskVolumeSourceApplyConfiguration struct { PdID *string `json:"pdID,omitempty"` FSType *string `json:"fsType,omitempty"` } -// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the PhotonPersistentDiskVolumeSource type for use with +// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the PhotonPersistentDiskVolumeSource type for use with // apply. func PhotonPersistentDiskVolumeSource() *PhotonPersistentDiskVolumeSourceApplyConfiguration { return &PhotonPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go index c3649829a..507d57d6f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodApplyConfiguration represents an declarative configuration of the Pod type for use +// PodApplyConfiguration represents a declarative configuration of the Pod type for use // with apply. type PodApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodApplyConfiguration struct { Status *PodStatusApplyConfiguration `json:"status,omitempty"` } -// Pod constructs an declarative configuration of the Pod type for use with +// Pod constructs a declarative configuration of the Pod type for use with // apply. func Pod(name, namespace string) *PodApplyConfiguration { b := &PodApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *PodApplyConfiguration) WithNamespace(value string) *PodApplyConfigurati return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PodApplyConfiguration) WithSelfLink(value string) *PodApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *PodApplyConfiguration) WithFinalizers(values ...string) *PodApplyConfig return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PodApplyConfiguration) WithClusterName(value string) *PodApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PodApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *PodApplyConfiguration) WithStatus(value *PodStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go index 7049c6212..23fed9546 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAffinityApplyConfiguration represents an declarative configuration of the PodAffinity type for use +// PodAffinityApplyConfiguration represents a declarative configuration of the PodAffinity type for use // with apply. type PodAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAffinityApplyConfiguration constructs an declarative configuration of the PodAffinity type for use with +// PodAffinityApplyConfiguration constructs a declarative configuration of the PodAffinity type for use with // apply. func PodAffinity() *PodAffinityApplyConfiguration { return &PodAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index 7d2492203..3afce026d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -22,16 +22,18 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodAffinityTermApplyConfiguration represents an declarative configuration of the PodAffinityTerm type for use +// PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use // with apply. type PodAffinityTermApplyConfiguration struct { LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` Namespaces []string `json:"namespaces,omitempty"` TopologyKey *string `json:"topologyKey,omitempty"` NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } -// PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with +// PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with // apply. func PodAffinityTerm() *PodAffinityTermApplyConfiguration { return &PodAffinityTermApplyConfiguration{} @@ -70,3 +72,23 @@ func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.Labe b.NamespaceSelector = value return b } + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) + } + return b +} + +// WithMismatchLabelKeys adds the given value to the MismatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MismatchLabelKeys field. +func (b *PodAffinityTermApplyConfiguration) WithMismatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { + for i := range values { + b.MismatchLabelKeys = append(b.MismatchLabelKeys, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go index 42681c54c..ae9848963 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAntiAffinityApplyConfiguration represents an declarative configuration of the PodAntiAffinity type for use +// PodAntiAffinityApplyConfiguration represents a declarative configuration of the PodAntiAffinity type for use // with apply. type PodAntiAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAntiAffinityApplyConfiguration constructs an declarative configuration of the PodAntiAffinity type for use with +// PodAntiAffinityApplyConfiguration constructs a declarative configuration of the PodAntiAffinity type for use with // apply. func PodAntiAffinity() *PodAntiAffinityApplyConfiguration { return &PodAntiAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go index 610209f3c..98968d26d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodConditionApplyConfiguration represents an declarative configuration of the PodCondition type for use +// PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use // with apply. type PodConditionApplyConfiguration struct { Type *v1.PodConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type PodConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PodConditionApplyConfiguration constructs an declarative configuration of the PodCondition type for use with +// PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with // apply. func PodCondition() *PodConditionApplyConfiguration { return &PodConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go index 0fe6a0834..2e0ce9a91 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PodDNSConfigApplyConfiguration represents an declarative configuration of the PodDNSConfig type for use +// PodDNSConfigApplyConfiguration represents a declarative configuration of the PodDNSConfig type for use // with apply. type PodDNSConfigApplyConfiguration struct { Nameservers []string `json:"nameservers,omitempty"` @@ -26,7 +26,7 @@ type PodDNSConfigApplyConfiguration struct { Options []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"` } -// PodDNSConfigApplyConfiguration constructs an declarative configuration of the PodDNSConfig type for use with +// PodDNSConfigApplyConfiguration constructs a declarative configuration of the PodDNSConfig type for use with // apply. func PodDNSConfig() *PodDNSConfigApplyConfiguration { return &PodDNSConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go index 327bf803b..458b333bf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodDNSConfigOptionApplyConfiguration represents an declarative configuration of the PodDNSConfigOption type for use +// PodDNSConfigOptionApplyConfiguration represents a declarative configuration of the PodDNSConfigOption type for use // with apply. type PodDNSConfigOptionApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// PodDNSConfigOptionApplyConfiguration constructs an declarative configuration of the PodDNSConfigOption type for use with +// PodDNSConfigOptionApplyConfiguration constructs a declarative configuration of the PodDNSConfigOption type for use with // apply. func PodDNSConfigOption() *PodDNSConfigOptionApplyConfiguration { return &PodDNSConfigOptionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go index 3c6e6b87a..73f089856 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodIPApplyConfiguration represents an declarative configuration of the PodIP type for use +// PodIPApplyConfiguration represents a declarative configuration of the PodIP type for use // with apply. type PodIPApplyConfiguration struct { IP *string `json:"ip,omitempty"` } -// PodIPApplyConfiguration constructs an declarative configuration of the PodIP type for use with +// PodIPApplyConfiguration constructs a declarative configuration of the PodIP type for use with // apply. func PodIP() *PodIPApplyConfiguration { return &PodIPApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go index a5315d636..7f156f817 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodOSApplyConfiguration represents an declarative configuration of the PodOS type for use +// PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use // with apply. type PodOSApplyConfiguration struct { Name *v1.OSName `json:"name,omitempty"` } -// PodOSApplyConfiguration constructs an declarative configuration of the PodOS type for use with +// PodOSApplyConfiguration constructs a declarative configuration of the PodOS type for use with // apply. func PodOS() *PodOSApplyConfiguration { return &PodOSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go index 9d3ad458a..09746df1b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// PodReadinessGateApplyConfiguration represents an declarative configuration of the PodReadinessGate type for use +// PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use // with apply. type PodReadinessGateApplyConfiguration struct { ConditionType *v1.PodConditionType `json:"conditionType,omitempty"` } -// PodReadinessGateApplyConfiguration constructs an declarative configuration of the PodReadinessGate type for use with +// PodReadinessGateApplyConfiguration constructs a declarative configuration of the PodReadinessGate type for use with // apply. func PodReadinessGate() *PodReadinessGateApplyConfiguration { return &PodReadinessGateApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go new file mode 100644 index 000000000..b0bd67fa1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodResourceClaimApplyConfiguration represents a declarative configuration of the PodResourceClaim type for use +// with apply. +type PodResourceClaimApplyConfiguration struct { + Name *string `json:"name,omitempty"` + ResourceClaimName *string `json:"resourceClaimName,omitempty"` + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` +} + +// PodResourceClaimApplyConfiguration constructs a declarative configuration of the PodResourceClaim type for use with +// apply. +func PodResourceClaim() *PodResourceClaimApplyConfiguration { + return &PodResourceClaimApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithName(value string) *PodResourceClaimApplyConfiguration { + b.Name = &value + return b +} + +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimName = &value + return b +} + +// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimTemplateName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimTemplateName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go new file mode 100644 index 000000000..f60ad4b05 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodResourceClaimStatusApplyConfiguration represents a declarative configuration of the PodResourceClaimStatus type for use +// with apply. +type PodResourceClaimStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + ResourceClaimName *string `json:"resourceClaimName,omitempty"` +} + +// PodResourceClaimStatusApplyConfiguration constructs a declarative configuration of the PodResourceClaimStatus type for use with +// apply. +func PodResourceClaimStatus() *PodResourceClaimStatusApplyConfiguration { + return &PodResourceClaimStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodResourceClaimStatusApplyConfiguration) WithName(value string) *PodResourceClaimStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimName field is set to the value of the last call. +func (b *PodResourceClaimStatusApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimStatusApplyConfiguration { + b.ResourceClaimName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go new file mode 100644 index 000000000..3d9109277 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PodSchedulingGateApplyConfiguration represents a declarative configuration of the PodSchedulingGate type for use +// with apply. +type PodSchedulingGateApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PodSchedulingGateApplyConfiguration constructs a declarative configuration of the PodSchedulingGate type for use with +// apply. +func PodSchedulingGate() *PodSchedulingGateApplyConfiguration { + return &PodSchedulingGateApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodSchedulingGateApplyConfiguration) WithName(value string) *PodSchedulingGateApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go index 6db09aa32..55085e630 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go @@ -22,22 +22,24 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSecurityContextApplyConfiguration represents an declarative configuration of the PodSecurityContext type for use +// PodSecurityContextApplyConfiguration represents a declarative configuration of the PodSecurityContext type for use // with apply. type PodSecurityContextApplyConfiguration struct { - SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` - WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` - RunAsUser *int64 `json:"runAsUser,omitempty"` - RunAsGroup *int64 `json:"runAsGroup,omitempty"` - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` - SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` - FSGroup *int64 `json:"fsGroup,omitempty"` - Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` - FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` - SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` + WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` + RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsGroup *int64 `json:"runAsGroup,omitempty"` + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + SupplementalGroupsPolicy *corev1.SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty"` + FSGroup *int64 `json:"fsGroup,omitempty"` + Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` + FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` + SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` } -// PodSecurityContextApplyConfiguration constructs an declarative configuration of the PodSecurityContext type for use with +// PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with // apply. func PodSecurityContext() *PodSecurityContextApplyConfiguration { return &PodSecurityContextApplyConfiguration{} @@ -93,6 +95,14 @@ func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroups(values ... return b } +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroupsPolicy(value corev1.SupplementalGroupsPolicy) *PodSecurityContextApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} + // WithFSGroup sets the FSGroup field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroup field is set to the value of the last call. @@ -129,3 +139,11 @@ func (b *PodSecurityContextApplyConfiguration) WithSeccompProfile(value *Seccomp b.SeccompProfile = value return b } + +// WithAppArmorProfile sets the AppArmorProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AppArmorProfile field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithAppArmorProfile(value *AppArmorProfileApplyConfiguration) *PodSecurityContextApplyConfiguration { + b.AppArmorProfile = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index 015859e9a..8134e044f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSpecApplyConfiguration represents an declarative configuration of the PodSpec type for use +// PodSpecApplyConfiguration represents a declarative configuration of the PodSpec type for use // with apply. type PodSpecApplyConfiguration struct { Volumes []VolumeApplyConfiguration `json:"volumes,omitempty"` @@ -61,9 +61,12 @@ type PodSpecApplyConfiguration struct { TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"` SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"` OS *PodOSApplyConfiguration `json:"os,omitempty"` + HostUsers *bool `json:"hostUsers,omitempty"` + SchedulingGates []PodSchedulingGateApplyConfiguration `json:"schedulingGates,omitempty"` + ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"` } -// PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with +// PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with // apply. func PodSpec() *PodSpecApplyConfiguration { return &PodSpecApplyConfiguration{} @@ -407,3 +410,37 @@ func (b *PodSpecApplyConfiguration) WithOS(value *PodOSApplyConfiguration) *PodS b.OS = value return b } + +// WithHostUsers sets the HostUsers field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostUsers field is set to the value of the last call. +func (b *PodSpecApplyConfiguration) WithHostUsers(value bool) *PodSpecApplyConfiguration { + b.HostUsers = &value + return b +} + +// WithSchedulingGates adds the given value to the SchedulingGates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SchedulingGates field. +func (b *PodSpecApplyConfiguration) WithSchedulingGates(values ...*PodSchedulingGateApplyConfiguration) *PodSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSchedulingGates") + } + b.SchedulingGates = append(b.SchedulingGates, *values[i]) + } + return b +} + +// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceClaims field. +func (b *PodSpecApplyConfiguration) WithResourceClaims(values ...*PodResourceClaimApplyConfiguration) *PodSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceClaims") + } + b.ResourceClaims = append(b.ResourceClaims, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 7ee5b9955..0b68996cd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -23,25 +23,28 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodStatusApplyConfiguration represents an declarative configuration of the PodStatus type for use +// PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use // with apply. type PodStatusApplyConfiguration struct { - Phase *v1.PodPhase `json:"phase,omitempty"` - Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"` - Message *string `json:"message,omitempty"` - Reason *string `json:"reason,omitempty"` - NominatedNodeName *string `json:"nominatedNodeName,omitempty"` - HostIP *string `json:"hostIP,omitempty"` - PodIP *string `json:"podIP,omitempty"` - PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"` - StartTime *metav1.Time `json:"startTime,omitempty"` - InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"` - ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` - QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` - EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` -} - -// PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with + Phase *v1.PodPhase `json:"phase,omitempty"` + Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"` + Message *string `json:"message,omitempty"` + Reason *string `json:"reason,omitempty"` + NominatedNodeName *string `json:"nominatedNodeName,omitempty"` + HostIP *string `json:"hostIP,omitempty"` + HostIPs []HostIPApplyConfiguration `json:"hostIPs,omitempty"` + PodIP *string `json:"podIP,omitempty"` + PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"` + ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` + QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` + EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` + Resize *v1.PodResizeStatus `json:"resize,omitempty"` + ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"` +} + +// PodStatusApplyConfiguration constructs a declarative configuration of the PodStatus type for use with // apply. func PodStatus() *PodStatusApplyConfiguration { return &PodStatusApplyConfiguration{} @@ -100,6 +103,19 @@ func (b *PodStatusApplyConfiguration) WithHostIP(value string) *PodStatusApplyCo return b } +// WithHostIPs adds the given value to the HostIPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HostIPs field. +func (b *PodStatusApplyConfiguration) WithHostIPs(values ...*HostIPApplyConfiguration) *PodStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHostIPs") + } + b.HostIPs = append(b.HostIPs, *values[i]) + } + return b +} + // WithPodIP sets the PodIP field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodIP field is set to the value of the last call. @@ -175,3 +191,24 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...* } return b } + +// WithResize sets the Resize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resize field is set to the value of the last call. +func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration { + b.Resize = &value + return b +} + +// WithResourceClaimStatuses adds the given value to the ResourceClaimStatuses field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceClaimStatuses field. +func (b *PodStatusApplyConfiguration) WithResourceClaimStatuses(values ...*PodResourceClaimStatusApplyConfiguration) *PodStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceClaimStatuses") + } + b.ResourceClaimStatuses = append(b.ResourceClaimStatuses, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go index 1460977c0..b4c8a658a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateApplyConfiguration represents an declarative configuration of the PodTemplate type for use +// PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use // with apply. type PodTemplateApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type PodTemplateApplyConfiguration struct { Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// PodTemplate constructs an declarative configuration of the PodTemplate type for use with +// PodTemplate constructs a declarative configuration of the PodTemplate type for use with // apply. func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { b := &PodTemplateApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *PodTemplateApplyConfiguration) WithNamespace(value string) *PodTemplate return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PodTemplateApplyConfiguration) WithSelfLink(value string) *PodTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *PodTemplateApplyConfiguration) WithFinalizers(values ...string) *PodTem return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PodTemplateApplyConfiguration) WithClusterName(value string) *PodTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PodTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *PodTemplateApplyConfiguration) WithTemplate(value *PodTemplateSpecApply b.Template = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go index ff06ea4b3..6146c01c7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go @@ -24,14 +24,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateSpecApplyConfiguration represents an declarative configuration of the PodTemplateSpec type for use +// PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use // with apply. type PodTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` } -// PodTemplateSpecApplyConfiguration constructs an declarative configuration of the PodTemplateSpec type for use with +// PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with // apply. func PodTemplateSpec() *PodTemplateSpecApplyConfiguration { return &PodTemplateSpecApplyConfiguration{} @@ -64,15 +64,6 @@ func (b *PodTemplateSpecApplyConfiguration) WithNamespace(value string) *PodTemp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PodTemplateSpecApplyConfiguration) WithSelfLink(value string) *PodTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -182,15 +173,6 @@ func (b *PodTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *Po return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PodTemplateSpecApplyConfiguration) WithClusterName(value string) *PodTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PodTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -204,3 +186,9 @@ func (b *PodTemplateSpecApplyConfiguration) WithSpec(value *PodSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go index 8c70c8f6c..5e738cabd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// PortStatusApplyConfiguration represents an declarative configuration of the PortStatus type for use +// PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use // with apply. type PortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type PortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// PortStatusApplyConfiguration constructs an declarative configuration of the PortStatus type for use with +// PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with // apply. func PortStatus() *PortStatusApplyConfiguration { return &PortStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go index 19cbb82ed..29715e021 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PortworxVolumeSourceApplyConfiguration represents an declarative configuration of the PortworxVolumeSource type for use +// PortworxVolumeSourceApplyConfiguration represents a declarative configuration of the PortworxVolumeSource type for use // with apply. type PortworxVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -26,7 +26,7 @@ type PortworxVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// PortworxVolumeSourceApplyConfiguration constructs an declarative configuration of the PortworxVolumeSource type for use with +// PortworxVolumeSourceApplyConfiguration constructs a declarative configuration of the PortworxVolumeSource type for use with // apply. func PortworxVolumeSource() *PortworxVolumeSourceApplyConfiguration { return &PortworxVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go index a373e4afe..b88a3646f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PreferredSchedulingTermApplyConfiguration represents an declarative configuration of the PreferredSchedulingTerm type for use +// PreferredSchedulingTermApplyConfiguration represents a declarative configuration of the PreferredSchedulingTerm type for use // with apply. type PreferredSchedulingTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` Preference *NodeSelectorTermApplyConfiguration `json:"preference,omitempty"` } -// PreferredSchedulingTermApplyConfiguration constructs an declarative configuration of the PreferredSchedulingTerm type for use with +// PreferredSchedulingTermApplyConfiguration constructs a declarative configuration of the PreferredSchedulingTerm type for use with // apply. func PreferredSchedulingTerm() *PreferredSchedulingTermApplyConfiguration { return &PreferredSchedulingTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go index 10730557a..3be1c9650 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeApplyConfiguration represents an declarative configuration of the Probe type for use +// ProbeApplyConfiguration represents a declarative configuration of the Probe type for use // with apply. type ProbeApplyConfiguration struct { ProbeHandlerApplyConfiguration `json:",inline"` @@ -30,7 +30,7 @@ type ProbeApplyConfiguration struct { TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } -// ProbeApplyConfiguration constructs an declarative configuration of the Probe type for use with +// ProbeApplyConfiguration constructs a declarative configuration of the Probe type for use with // apply. func Probe() *ProbeApplyConfiguration { return &ProbeApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go index 54f3344ac..1f88745ea 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeHandlerApplyConfiguration represents an declarative configuration of the ProbeHandler type for use +// ProbeHandlerApplyConfiguration represents a declarative configuration of the ProbeHandler type for use // with apply. type ProbeHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` @@ -27,7 +27,7 @@ type ProbeHandlerApplyConfiguration struct { GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"` } -// ProbeHandlerApplyConfiguration constructs an declarative configuration of the ProbeHandler type for use with +// ProbeHandlerApplyConfiguration constructs a declarative configuration of the ProbeHandler type for use with // apply. func ProbeHandler() *ProbeHandlerApplyConfiguration { return &ProbeHandlerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go index 0a9d1d88e..c922ec8cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ProjectedVolumeSourceApplyConfiguration represents an declarative configuration of the ProjectedVolumeSource type for use +// ProjectedVolumeSourceApplyConfiguration represents a declarative configuration of the ProjectedVolumeSource type for use // with apply. type ProjectedVolumeSourceApplyConfiguration struct { Sources []VolumeProjectionApplyConfiguration `json:"sources,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// ProjectedVolumeSourceApplyConfiguration constructs an declarative configuration of the ProjectedVolumeSource type for use with +// ProjectedVolumeSourceApplyConfiguration constructs a declarative configuration of the ProjectedVolumeSource type for use with // apply. func ProjectedVolumeSource() *ProjectedVolumeSourceApplyConfiguration { return &ProjectedVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go index 646052ea4..9a042a0a1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// QuobyteVolumeSourceApplyConfiguration represents an declarative configuration of the QuobyteVolumeSource type for use +// QuobyteVolumeSourceApplyConfiguration represents a declarative configuration of the QuobyteVolumeSource type for use // with apply. type QuobyteVolumeSourceApplyConfiguration struct { Registry *string `json:"registry,omitempty"` @@ -29,7 +29,7 @@ type QuobyteVolumeSourceApplyConfiguration struct { Tenant *string `json:"tenant,omitempty"` } -// QuobyteVolumeSourceApplyConfiguration constructs an declarative configuration of the QuobyteVolumeSource type for use with +// QuobyteVolumeSourceApplyConfiguration constructs a declarative configuration of the QuobyteVolumeSource type for use with // apply. func QuobyteVolumeSource() *QuobyteVolumeSourceApplyConfiguration { return &QuobyteVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go index ffcb836eb..64f25724a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the RBDPersistentVolumeSource type for use +// RBDPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the RBDPersistentVolumeSource type for use // with apply. type RBDPersistentVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDPersistentVolumeSource type for use with +// RBDPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDPersistentVolumeSource type for use with // apply. func RBDPersistentVolumeSource() *RBDPersistentVolumeSourceApplyConfiguration { return &RBDPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go index 8e7c81732..8dae198c0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDVolumeSourceApplyConfiguration represents an declarative configuration of the RBDVolumeSource type for use +// RBDVolumeSourceApplyConfiguration represents a declarative configuration of the RBDVolumeSource type for use // with apply. type RBDVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDVolumeSource type for use with +// RBDVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDVolumeSource type for use with // apply. func RBDVolumeSource() *RBDVolumeSourceApplyConfiguration { return &RBDVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go index 6dd6ae267..b28f422dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicationControllerApplyConfiguration represents an declarative configuration of the ReplicationController type for use +// ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use // with apply. type ReplicationControllerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicationControllerApplyConfiguration struct { Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicationController constructs an declarative configuration of the ReplicationController type for use with +// ReplicationController constructs a declarative configuration of the ReplicationController type for use with // apply. func ReplicationController(name, namespace string) *ReplicationControllerApplyConfiguration { b := &ReplicationControllerApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *ReplicationControllerApplyConfiguration) WithNamespace(value string) *R return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ReplicationControllerApplyConfiguration) WithSelfLink(value string) *ReplicationControllerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *ReplicationControllerApplyConfiguration) WithFinalizers(values ...strin return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ReplicationControllerApplyConfiguration) WithClusterName(value string) *ReplicationControllerApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ReplicationControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *ReplicationControllerApplyConfiguration) WithStatus(value *ReplicationC b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicationControllerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go index c3d56cc69..0d74c1db9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicationControllerConditionApplyConfiguration represents an declarative configuration of the ReplicationControllerCondition type for use +// ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use // with apply. type ReplicationControllerConditionApplyConfiguration struct { Type *v1.ReplicationControllerConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type ReplicationControllerConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicationControllerConditionApplyConfiguration constructs an declarative configuration of the ReplicationControllerCondition type for use with +// ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with // apply. func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfiguration { return &ReplicationControllerConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go index dd4e081d9..07bac9f4c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerSpecApplyConfiguration represents an declarative configuration of the ReplicationControllerSpec type for use +// ReplicationControllerSpecApplyConfiguration represents a declarative configuration of the ReplicationControllerSpec type for use // with apply. type ReplicationControllerSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -27,7 +27,7 @@ type ReplicationControllerSpecApplyConfiguration struct { Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicationControllerSpecApplyConfiguration constructs an declarative configuration of the ReplicationControllerSpec type for use with +// ReplicationControllerSpecApplyConfiguration constructs a declarative configuration of the ReplicationControllerSpec type for use with // apply. func ReplicationControllerSpec() *ReplicationControllerSpecApplyConfiguration { return &ReplicationControllerSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go index 1b994cfb8..c8046aa5a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerStatusApplyConfiguration represents an declarative configuration of the ReplicationControllerStatus type for use +// ReplicationControllerStatusApplyConfiguration represents a declarative configuration of the ReplicationControllerStatus type for use // with apply. type ReplicationControllerStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicationControllerStatusApplyConfiguration struct { Conditions []ReplicationControllerConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicationControllerStatusApplyConfiguration constructs an declarative configuration of the ReplicationControllerStatus type for use with +// ReplicationControllerStatusApplyConfiguration constructs a declarative configuration of the ReplicationControllerStatus type for use with // apply. func ReplicationControllerStatus() *ReplicationControllerStatusApplyConfiguration { return &ReplicationControllerStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go new file mode 100644 index 000000000..b00c69248 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use +// with apply. +type ResourceClaimApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Request *string `json:"request,omitempty"` +} + +// ResourceClaimApplyConfiguration constructs a declarative configuration of the ResourceClaim type for use with +// apply. +func ResourceClaim() *ResourceClaimApplyConfiguration { + return &ResourceClaimApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { + b.Name = &value + return b +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithRequest(value string) *ResourceClaimApplyConfiguration { + b.Request = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go index 2741227dd..1b4918a63 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceFieldSelectorApplyConfiguration represents an declarative configuration of the ResourceFieldSelector type for use +// ResourceFieldSelectorApplyConfiguration represents a declarative configuration of the ResourceFieldSelector type for use // with apply. type ResourceFieldSelectorApplyConfiguration struct { ContainerName *string `json:"containerName,omitempty"` @@ -30,7 +30,7 @@ type ResourceFieldSelectorApplyConfiguration struct { Divisor *resource.Quantity `json:"divisor,omitempty"` } -// ResourceFieldSelectorApplyConfiguration constructs an declarative configuration of the ResourceFieldSelector type for use with +// ResourceFieldSelectorApplyConfiguration constructs a declarative configuration of the ResourceFieldSelector type for use with // apply. func ResourceFieldSelector() *ResourceFieldSelectorApplyConfiguration { return &ResourceFieldSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go new file mode 100644 index 000000000..5169cb4bc --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use +// with apply. +type ResourceHealthApplyConfiguration struct { + ResourceID *v1.ResourceID `json:"resourceID,omitempty"` + Health *v1.ResourceHealthStatus `json:"health,omitempty"` +} + +// ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with +// apply. +func ResourceHealth() *ResourceHealthApplyConfiguration { + return &ResourceHealthApplyConfiguration{} +} + +// WithResourceID sets the ResourceID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceID field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithResourceID(value v1.ResourceID) *ResourceHealthApplyConfiguration { + b.ResourceID = &value + return b +} + +// WithHealth sets the Health field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Health field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithHealth(value v1.ResourceHealthStatus) *ResourceHealthApplyConfiguration { + b.Health = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go index 5cfb1988b..2b78ba703 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceQuotaApplyConfiguration represents an declarative configuration of the ResourceQuota type for use +// ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use // with apply. type ResourceQuotaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ResourceQuotaApplyConfiguration struct { Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` } -// ResourceQuota constructs an declarative configuration of the ResourceQuota type for use with +// ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with // apply. func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { b := &ResourceQuotaApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *ResourceQuotaApplyConfiguration) WithNamespace(value string) *ResourceQ return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ResourceQuotaApplyConfiguration) WithSelfLink(value string) *ResourceQuotaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *ResourceQuotaApplyConfiguration) WithFinalizers(values ...string) *Reso return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ResourceQuotaApplyConfiguration) WithClusterName(value string) *ResourceQuotaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ResourceQuotaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *ResourceQuotaApplyConfiguration) WithStatus(value *ResourceQuotaStatusA b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceQuotaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go index feb454bc4..0012ace25 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceQuotaSpecApplyConfiguration represents an declarative configuration of the ResourceQuotaSpec type for use +// ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use // with apply. type ResourceQuotaSpecApplyConfiguration struct { Hard *v1.ResourceList `json:"hard,omitempty"` @@ -30,7 +30,7 @@ type ResourceQuotaSpecApplyConfiguration struct { ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"` } -// ResourceQuotaSpecApplyConfiguration constructs an declarative configuration of the ResourceQuotaSpec type for use with +// ResourceQuotaSpecApplyConfiguration constructs a declarative configuration of the ResourceQuotaSpec type for use with // apply. func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration { return &ResourceQuotaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go index 4dced90f7..364b96eec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceQuotaStatusApplyConfiguration represents an declarative configuration of the ResourceQuotaStatus type for use +// ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use // with apply. type ResourceQuotaStatusApplyConfiguration struct { Hard *v1.ResourceList `json:"hard,omitempty"` Used *v1.ResourceList `json:"used,omitempty"` } -// ResourceQuotaStatusApplyConfiguration constructs an declarative configuration of the ResourceQuotaStatus type for use with +// ResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatus type for use with // apply. func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration { return &ResourceQuotaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go index d22f38479..51197862c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go @@ -22,14 +22,15 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceRequirementsApplyConfiguration represents an declarative configuration of the ResourceRequirements type for use +// ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use // with apply. type ResourceRequirementsApplyConfiguration struct { - Limits *v1.ResourceList `json:"limits,omitempty"` - Requests *v1.ResourceList `json:"requests,omitempty"` + Limits *v1.ResourceList `json:"limits,omitempty"` + Requests *v1.ResourceList `json:"requests,omitempty"` + Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"` } -// ResourceRequirementsApplyConfiguration constructs an declarative configuration of the ResourceRequirements type for use with +// ResourceRequirementsApplyConfiguration constructs a declarative configuration of the ResourceRequirements type for use with // apply. func ResourceRequirements() *ResourceRequirementsApplyConfiguration { return &ResourceRequirementsApplyConfiguration{} @@ -50,3 +51,16 @@ func (b *ResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceL b.Requests = &value return b } + +// WithClaims adds the given value to the Claims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Claims field. +func (b *ResourceRequirementsApplyConfiguration) WithClaims(values ...*ResourceClaimApplyConfiguration) *ResourceRequirementsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithClaims") + } + b.Claims = append(b.Claims, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go new file mode 100644 index 000000000..1e63c87f8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use +// with apply. +type ResourceStatusApplyConfiguration struct { + Name *v1.ResourceName `json:"name,omitempty"` + Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"` +} + +// ResourceStatusApplyConfiguration constructs a declarative configuration of the ResourceStatus type for use with +// apply. +func ResourceStatus() *ResourceStatusApplyConfiguration { + return &ResourceStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceStatusApplyConfiguration) WithName(value v1.ResourceName) *ResourceStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourceStatusApplyConfiguration) WithResources(values ...*ResourceHealthApplyConfiguration) *ResourceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go index fffb5b186..b07f46de9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOPersistentVolumeSource type for use +// ScaleIOPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOPersistentVolumeSource type for use // with apply. type ScaleIOPersistentVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOPersistentVolumeSource type for use with +// ScaleIOPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOPersistentVolumeSource type for use with // apply. func ScaleIOPersistentVolumeSource() *ScaleIOPersistentVolumeSourceApplyConfiguration { return &ScaleIOPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go index b54e1161e..740c05ebb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOVolumeSource type for use +// ScaleIOVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOVolumeSource type for use // with apply. type ScaleIOVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOVolumeSource type for use with +// ScaleIOVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOVolumeSource type for use with // apply. func ScaleIOVolumeSource() *ScaleIOVolumeSourceApplyConfiguration { return &ScaleIOVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go index c901a2ae6..c6ec87827 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ScopedResourceSelectorRequirementApplyConfiguration represents an declarative configuration of the ScopedResourceSelectorRequirement type for use +// ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use // with apply. type ScopedResourceSelectorRequirementApplyConfiguration struct { ScopeName *v1.ResourceQuotaScope `json:"scopeName,omitempty"` @@ -30,7 +30,7 @@ type ScopedResourceSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// ScopedResourceSelectorRequirementApplyConfiguration constructs an declarative configuration of the ScopedResourceSelectorRequirement type for use with +// ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with // apply. func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApplyConfiguration { return &ScopedResourceSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go index 3251e9dc1..a9fb9a1b1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScopeSelectorApplyConfiguration represents an declarative configuration of the ScopeSelector type for use +// ScopeSelectorApplyConfiguration represents a declarative configuration of the ScopeSelector type for use // with apply. type ScopeSelectorApplyConfiguration struct { MatchExpressions []ScopedResourceSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// ScopeSelectorApplyConfiguration constructs an declarative configuration of the ScopeSelector type for use with +// ScopeSelectorApplyConfiguration constructs a declarative configuration of the ScopeSelector type for use with // apply. func ScopeSelector() *ScopeSelectorApplyConfiguration { return &ScopeSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go index 9818a00e7..eb3077a05 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// SeccompProfileApplyConfiguration represents an declarative configuration of the SeccompProfile type for use +// SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use // with apply. type SeccompProfileApplyConfiguration struct { Type *v1.SeccompProfileType `json:"type,omitempty"` LocalhostProfile *string `json:"localhostProfile,omitempty"` } -// SeccompProfileApplyConfiguration constructs an declarative configuration of the SeccompProfile type for use with +// SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with // apply. func SeccompProfile() *SeccompProfileApplyConfiguration { return &SeccompProfileApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go index 00d789f41..1d850b00b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// SecretApplyConfiguration represents an declarative configuration of the Secret type for use +// SecretApplyConfiguration represents a declarative configuration of the Secret type for use // with apply. type SecretApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -38,7 +38,7 @@ type SecretApplyConfiguration struct { Type *corev1.SecretType `json:"type,omitempty"` } -// Secret constructs an declarative configuration of the Secret type for use with +// Secret constructs a declarative configuration of the Secret type for use with // apply. func Secret(name, namespace string) *SecretApplyConfiguration { b := &SecretApplyConfiguration{} @@ -128,15 +128,6 @@ func (b *SecretApplyConfiguration) WithNamespace(value string) *SecretApplyConfi return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *SecretApplyConfiguration) WithSelfLink(value string) *SecretApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -246,15 +237,6 @@ func (b *SecretApplyConfiguration) WithFinalizers(values ...string) *SecretApply return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *SecretApplyConfiguration) WithClusterName(value string) *SecretApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *SecretApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -304,3 +286,9 @@ func (b *SecretApplyConfiguration) WithType(value corev1.SecretType) *SecretAppl b.Type = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *SecretApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go index 7b22a8d0b..ba99b7f5f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretEnvSourceApplyConfiguration represents an declarative configuration of the SecretEnvSource type for use +// SecretEnvSourceApplyConfiguration represents a declarative configuration of the SecretEnvSource type for use // with apply. type SecretEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// SecretEnvSourceApplyConfiguration constructs an declarative configuration of the SecretEnvSource type for use with +// SecretEnvSourceApplyConfiguration constructs a declarative configuration of the SecretEnvSource type for use with // apply. func SecretEnvSource() *SecretEnvSourceApplyConfiguration { return &SecretEnvSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go index b8464a348..2d490b810 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretKeySelectorApplyConfiguration represents an declarative configuration of the SecretKeySelector type for use +// SecretKeySelectorApplyConfiguration represents a declarative configuration of the SecretKeySelector type for use // with apply. type SecretKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretKeySelectorApplyConfiguration constructs an declarative configuration of the SecretKeySelector type for use with +// SecretKeySelectorApplyConfiguration constructs a declarative configuration of the SecretKeySelector type for use with // apply. func SecretKeySelector() *SecretKeySelectorApplyConfiguration { return &SecretKeySelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go index e8edc6127..65ce3c66d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretProjectionApplyConfiguration represents an declarative configuration of the SecretProjection type for use +// SecretProjectionApplyConfiguration represents a declarative configuration of the SecretProjection type for use // with apply. type SecretProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretProjectionApplyConfiguration constructs an declarative configuration of the SecretProjection type for use with +// SecretProjectionApplyConfiguration constructs a declarative configuration of the SecretProjection type for use with // apply. func SecretProjection() *SecretProjectionApplyConfiguration { return &SecretProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go index 95579d003..f5e0de23a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretReferenceApplyConfiguration represents an declarative configuration of the SecretReference type for use +// SecretReferenceApplyConfiguration represents a declarative configuration of the SecretReference type for use // with apply. type SecretReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` Namespace *string `json:"namespace,omitempty"` } -// SecretReferenceApplyConfiguration constructs an declarative configuration of the SecretReference type for use with +// SecretReferenceApplyConfiguration constructs a declarative configuration of the SecretReference type for use with // apply. func SecretReference() *SecretReferenceApplyConfiguration { return &SecretReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go index bcb441e9f..9f765d354 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretVolumeSourceApplyConfiguration represents an declarative configuration of the SecretVolumeSource type for use +// SecretVolumeSourceApplyConfiguration represents a declarative configuration of the SecretVolumeSource type for use // with apply. type SecretVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type SecretVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretVolumeSourceApplyConfiguration constructs an declarative configuration of the SecretVolumeSource type for use with +// SecretVolumeSourceApplyConfiguration constructs a declarative configuration of the SecretVolumeSource type for use with // apply. func SecretVolumeSource() *SecretVolumeSourceApplyConfiguration { return &SecretVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go index 8f01537eb..99faab72d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// SecurityContextApplyConfiguration represents an declarative configuration of the SecurityContext type for use +// SecurityContextApplyConfiguration represents a declarative configuration of the SecurityContext type for use // with apply. type SecurityContextApplyConfiguration struct { Capabilities *CapabilitiesApplyConfiguration `json:"capabilities,omitempty"` @@ -36,9 +36,10 @@ type SecurityContextApplyConfiguration struct { AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` ProcMount *corev1.ProcMountType `json:"procMount,omitempty"` SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` } -// SecurityContextApplyConfiguration constructs an declarative configuration of the SecurityContext type for use with +// SecurityContextApplyConfiguration constructs a declarative configuration of the SecurityContext type for use with // apply. func SecurityContext() *SecurityContextApplyConfiguration { return &SecurityContextApplyConfiguration{} @@ -131,3 +132,11 @@ func (b *SecurityContextApplyConfiguration) WithSeccompProfile(value *SeccompPro b.SeccompProfile = value return b } + +// WithAppArmorProfile sets the AppArmorProfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AppArmorProfile field is set to the value of the last call. +func (b *SecurityContextApplyConfiguration) WithAppArmorProfile(value *AppArmorProfileApplyConfiguration) *SecurityContextApplyConfiguration { + b.AppArmorProfile = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go index 2938faa18..bad01300f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SELinuxOptionsApplyConfiguration represents an declarative configuration of the SELinuxOptions type for use +// SELinuxOptionsApplyConfiguration represents a declarative configuration of the SELinuxOptions type for use // with apply. type SELinuxOptionsApplyConfiguration struct { User *string `json:"user,omitempty"` @@ -27,7 +27,7 @@ type SELinuxOptionsApplyConfiguration struct { Level *string `json:"level,omitempty"` } -// SELinuxOptionsApplyConfiguration constructs an declarative configuration of the SELinuxOptions type for use with +// SELinuxOptionsApplyConfiguration constructs a declarative configuration of the SELinuxOptions type for use with // apply. func SELinuxOptions() *SELinuxOptionsApplyConfiguration { return &SELinuxOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go index 31b47311f..2dac0589d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceApplyConfiguration represents an declarative configuration of the Service type for use +// ServiceApplyConfiguration represents a declarative configuration of the Service type for use // with apply. type ServiceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ServiceApplyConfiguration struct { Status *ServiceStatusApplyConfiguration `json:"status,omitempty"` } -// Service constructs an declarative configuration of the Service type for use with +// Service constructs a declarative configuration of the Service type for use with // apply. func Service(name, namespace string) *ServiceApplyConfiguration { b := &ServiceApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *ServiceApplyConfiguration) WithNamespace(value string) *ServiceApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ServiceApplyConfiguration) WithSelfLink(value string) *ServiceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *ServiceApplyConfiguration) WithFinalizers(values ...string) *ServiceApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ServiceApplyConfiguration) WithClusterName(value string) *ServiceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ServiceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *ServiceApplyConfiguration) WithStatus(value *ServiceStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go index 459d025eb..26d33deb9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceAccountApplyConfiguration represents an declarative configuration of the ServiceAccount type for use +// ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use // with apply. type ServiceAccountApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ServiceAccountApplyConfiguration struct { AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` } -// ServiceAccount constructs an declarative configuration of the ServiceAccount type for use with +// ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with // apply. func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { b := &ServiceAccountApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *ServiceAccountApplyConfiguration) WithNamespace(value string) *ServiceA return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ServiceAccountApplyConfiguration) WithSelfLink(value string) *ServiceAccountApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *ServiceAccountApplyConfiguration) WithFinalizers(values ...string) *Ser return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ServiceAccountApplyConfiguration) WithClusterName(value string) *ServiceAccountApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ServiceAccountApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -293,3 +275,9 @@ func (b *ServiceAccountApplyConfiguration) WithAutomountServiceAccountToken(valu b.AutomountServiceAccountToken = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceAccountApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go index a52fad7d8..fab81bf8a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceAccountTokenProjectionApplyConfiguration represents an declarative configuration of the ServiceAccountTokenProjection type for use +// ServiceAccountTokenProjectionApplyConfiguration represents a declarative configuration of the ServiceAccountTokenProjection type for use // with apply. type ServiceAccountTokenProjectionApplyConfiguration struct { Audience *string `json:"audience,omitempty"` @@ -26,7 +26,7 @@ type ServiceAccountTokenProjectionApplyConfiguration struct { Path *string `json:"path,omitempty"` } -// ServiceAccountTokenProjectionApplyConfiguration constructs an declarative configuration of the ServiceAccountTokenProjection type for use with +// ServiceAccountTokenProjectionApplyConfiguration constructs a declarative configuration of the ServiceAccountTokenProjection type for use with // apply. func ServiceAccountTokenProjection() *ServiceAccountTokenProjectionApplyConfiguration { return &ServiceAccountTokenProjectionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go index 8bc63bd95..e889f2134 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// ServicePortApplyConfiguration represents an declarative configuration of the ServicePort type for use +// ServicePortApplyConfiguration represents a declarative configuration of the ServicePort type for use // with apply. type ServicePortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -34,7 +34,7 @@ type ServicePortApplyConfiguration struct { NodePort *int32 `json:"nodePort,omitempty"` } -// ServicePortApplyConfiguration constructs an declarative configuration of the ServicePort type for use with +// ServicePortApplyConfiguration constructs a declarative configuration of the ServicePort type for use with // apply. func ServicePort() *ServicePortApplyConfiguration { return &ServicePortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index 856cd4f9d..41367dce4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ServiceSpecApplyConfiguration represents an declarative configuration of the ServiceSpec type for use +// ServiceSpecApplyConfiguration represents a declarative configuration of the ServiceSpec type for use // with apply. type ServiceSpecApplyConfiguration struct { Ports []ServicePortApplyConfiguration `json:"ports,omitempty"` @@ -35,18 +35,19 @@ type ServiceSpecApplyConfiguration struct { LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` ExternalName *string `json:"externalName,omitempty"` - ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"` + ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"` HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"` PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"` SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"` IPFamilies []corev1.IPFamily `json:"ipFamilies,omitempty"` - IPFamilyPolicy *corev1.IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty"` + IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"` AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"` LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` - InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty"` + InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"` + TrafficDistribution *string `json:"trafficDistribution,omitempty"` } -// ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with +// ServiceSpecApplyConfiguration constructs a declarative configuration of the ServiceSpec type for use with // apply. func ServiceSpec() *ServiceSpecApplyConfiguration { return &ServiceSpecApplyConfiguration{} @@ -152,7 +153,7 @@ func (b *ServiceSpecApplyConfiguration) WithExternalName(value string) *ServiceS // WithExternalTrafficPolicy sets the ExternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ExternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithExternalTrafficPolicy(value corev1.ServiceExternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.ExternalTrafficPolicy = &value return b } @@ -194,7 +195,7 @@ func (b *ServiceSpecApplyConfiguration) WithIPFamilies(values ...corev1.IPFamily // WithIPFamilyPolicy sets the IPFamilyPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the IPFamilyPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithIPFamilyPolicy(value corev1.IPFamilyPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithIPFamilyPolicy(value corev1.IPFamilyPolicy) *ServiceSpecApplyConfiguration { b.IPFamilyPolicy = &value return b } @@ -218,7 +219,15 @@ func (b *ServiceSpecApplyConfiguration) WithLoadBalancerClass(value string) *Ser // WithInternalTrafficPolicy sets the InternalTrafficPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InternalTrafficPolicy field is set to the value of the last call. -func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicyType) *ServiceSpecApplyConfiguration { +func (b *ServiceSpecApplyConfiguration) WithInternalTrafficPolicy(value corev1.ServiceInternalTrafficPolicy) *ServiceSpecApplyConfiguration { b.InternalTrafficPolicy = &value return b } + +// WithTrafficDistribution sets the TrafficDistribution field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TrafficDistribution field is set to the value of the last call. +func (b *ServiceSpecApplyConfiguration) WithTrafficDistribution(value string) *ServiceSpecApplyConfiguration { + b.TrafficDistribution = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go index 2347cec67..11c3f8a80 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go @@ -22,14 +22,14 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceStatusApplyConfiguration represents an declarative configuration of the ServiceStatus type for use +// ServiceStatusApplyConfiguration represents a declarative configuration of the ServiceStatus type for use // with apply. type ServiceStatusApplyConfiguration struct { LoadBalancer *LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ServiceStatusApplyConfiguration constructs an declarative configuration of the ServiceStatus type for use with +// ServiceStatusApplyConfiguration constructs a declarative configuration of the ServiceStatus type for use with // apply. func ServiceStatus() *ServiceStatusApplyConfiguration { return &ServiceStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go index 7016f836a..13b045fff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SessionAffinityConfigApplyConfiguration represents an declarative configuration of the SessionAffinityConfig type for use +// SessionAffinityConfigApplyConfiguration represents a declarative configuration of the SessionAffinityConfig type for use // with apply. type SessionAffinityConfigApplyConfiguration struct { ClientIP *ClientIPConfigApplyConfiguration `json:"clientIP,omitempty"` } -// SessionAffinityConfigApplyConfiguration constructs an declarative configuration of the SessionAffinityConfig type for use with +// SessionAffinityConfigApplyConfiguration constructs a declarative configuration of the SessionAffinityConfig type for use with // apply. func SessionAffinityConfig() *SessionAffinityConfigApplyConfiguration { return &SessionAffinityConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go new file mode 100644 index 000000000..b4115609b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SleepActionApplyConfiguration represents a declarative configuration of the SleepAction type for use +// with apply. +type SleepActionApplyConfiguration struct { + Seconds *int64 `json:"seconds,omitempty"` +} + +// SleepActionApplyConfiguration constructs a declarative configuration of the SleepAction type for use with +// apply. +func SleepAction() *SleepActionApplyConfiguration { + return &SleepActionApplyConfiguration{} +} + +// WithSeconds sets the Seconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Seconds field is set to the value of the last call. +func (b *SleepActionApplyConfiguration) WithSeconds(value int64) *SleepActionApplyConfiguration { + b.Seconds = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go index 00ed39ccb..7381a498e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSPersistentVolumeSource type for use +// StorageOSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSPersistentVolumeSource type for use // with apply. type StorageOSPersistentVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSPersistentVolumeSourceApplyConfiguration struct { SecretRef *ObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSPersistentVolumeSource type for use with +// StorageOSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSPersistentVolumeSource type for use with // apply. func StorageOSPersistentVolumeSource() *StorageOSPersistentVolumeSourceApplyConfiguration { return &StorageOSPersistentVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go index 7f3b810cf..81d9373c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSVolumeSource type for use +// StorageOSVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSVolumeSource type for use // with apply. type StorageOSVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSVolumeSource type for use with +// StorageOSVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSVolumeSource type for use with // apply. func StorageOSVolumeSource() *StorageOSVolumeSourceApplyConfiguration { return &StorageOSVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go index deab9e0b3..7719eb7d6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SysctlApplyConfiguration represents an declarative configuration of the Sysctl type for use +// SysctlApplyConfiguration represents a declarative configuration of the Sysctl type for use // with apply. type SysctlApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// SysctlApplyConfiguration constructs an declarative configuration of the Sysctl type for use with +// SysctlApplyConfiguration constructs a declarative configuration of the Sysctl type for use with // apply. func Sysctl() *SysctlApplyConfiguration { return &SysctlApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go index 4672b8742..a34fb0552 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TaintApplyConfiguration represents an declarative configuration of the Taint type for use +// TaintApplyConfiguration represents a declarative configuration of the Taint type for use // with apply. type TaintApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -32,7 +32,7 @@ type TaintApplyConfiguration struct { TimeAdded *metav1.Time `json:"timeAdded,omitempty"` } -// TaintApplyConfiguration constructs an declarative configuration of the Taint type for use with +// TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with // apply. func Taint() *TaintApplyConfiguration { return &TaintApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go index bd038fc3a..cba1a7d08 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// TCPSocketActionApplyConfiguration represents an declarative configuration of the TCPSocketAction type for use +// TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use // with apply. type TCPSocketActionApplyConfiguration struct { Port *intstr.IntOrString `json:"port,omitempty"` Host *string `json:"host,omitempty"` } -// TCPSocketActionApplyConfiguration constructs an declarative configuration of the TCPSocketAction type for use with +// TCPSocketActionApplyConfiguration constructs a declarative configuration of the TCPSocketAction type for use with // apply. func TCPSocketAction() *TCPSocketActionApplyConfiguration { return &TCPSocketActionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go index 1a92a8c66..1bcc85b65 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// TolerationApplyConfiguration represents an declarative configuration of the Toleration type for use +// TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use // with apply. type TolerationApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -32,7 +32,7 @@ type TolerationApplyConfiguration struct { TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` } -// TolerationApplyConfiguration constructs an declarative configuration of the Toleration type for use with +// TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with // apply. func Toleration() *TolerationApplyConfiguration { return &TolerationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go index 9581490de..674ddec93 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TopologySelectorLabelRequirementApplyConfiguration represents an declarative configuration of the TopologySelectorLabelRequirement type for use +// TopologySelectorLabelRequirementApplyConfiguration represents a declarative configuration of the TopologySelectorLabelRequirement type for use // with apply. type TopologySelectorLabelRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` Values []string `json:"values,omitempty"` } -// TopologySelectorLabelRequirementApplyConfiguration constructs an declarative configuration of the TopologySelectorLabelRequirement type for use with +// TopologySelectorLabelRequirementApplyConfiguration constructs a declarative configuration of the TopologySelectorLabelRequirement type for use with // apply. func TopologySelectorLabelRequirement() *TopologySelectorLabelRequirementApplyConfiguration { return &TopologySelectorLabelRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go index a025b8a2a..7812ae520 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// TopologySelectorTermApplyConfiguration represents an declarative configuration of the TopologySelectorTerm type for use +// TopologySelectorTermApplyConfiguration represents a declarative configuration of the TopologySelectorTerm type for use // with apply. type TopologySelectorTermApplyConfiguration struct { MatchLabelExpressions []TopologySelectorLabelRequirementApplyConfiguration `json:"matchLabelExpressions,omitempty"` } -// TopologySelectorTermApplyConfiguration constructs an declarative configuration of the TopologySelectorTerm type for use with +// TopologySelectorTermApplyConfiguration constructs a declarative configuration of the TopologySelectorTerm type for use with // apply. func TopologySelectorTerm() *TopologySelectorTermApplyConfiguration { return &TopologySelectorTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go index ac8b82eea..b21d23351 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go @@ -23,16 +23,20 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use +// TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the TopologySpreadConstraint type for use // with apply. type TopologySpreadConstraintApplyConfiguration struct { - MaxSkew *int32 `json:"maxSkew,omitempty"` - TopologyKey *string `json:"topologyKey,omitempty"` - WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` - LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + MaxSkew *int32 `json:"maxSkew,omitempty"` + TopologyKey *string `json:"topologyKey,omitempty"` + WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + MinDomains *int32 `json:"minDomains,omitempty"` + NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"` + NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` } -// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with +// TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the TopologySpreadConstraint type for use with // apply. func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration { return &TopologySpreadConstraintApplyConfiguration{} @@ -69,3 +73,37 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithLabelSelector(value *me b.LabelSelector = value return b } + +// WithMinDomains sets the MinDomains field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinDomains field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) *TopologySpreadConstraintApplyConfiguration { + b.MinDomains = &value + return b +} + +// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { + b.NodeAffinityPolicy = &value + return b +} + +// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call. +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { + b.NodeTaintsPolicy = &value + return b +} + +// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. +func (b *TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *TopologySpreadConstraintApplyConfiguration { + for i := range values { + b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go index cdc2eb7d3..1e63b7988 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// TypedLocalObjectReferenceApplyConfiguration represents an declarative configuration of the TypedLocalObjectReference type for use +// TypedLocalObjectReferenceApplyConfiguration represents a declarative configuration of the TypedLocalObjectReference type for use // with apply. type TypedLocalObjectReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type TypedLocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// TypedLocalObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedLocalObjectReference type for use with +// TypedLocalObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedLocalObjectReference type for use with // apply. func TypedLocalObjectReference() *TypedLocalObjectReferenceApplyConfiguration { return &TypedLocalObjectReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go new file mode 100644 index 000000000..f07de8902 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// TypedObjectReferenceApplyConfiguration represents a declarative configuration of the TypedObjectReference type for use +// with apply. +type TypedObjectReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` +} + +// TypedObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedObjectReference type for use with +// apply. +func TypedObjectReference() *TypedObjectReferenceApplyConfiguration { + return &TypedObjectReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithAPIGroup(value string) *TypedObjectReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithKind(value string) *TypedObjectReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithName(value string) *TypedObjectReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *TypedObjectReferenceApplyConfiguration) WithNamespace(value string) *TypedObjectReferenceApplyConfiguration { + b.Namespace = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go index db0686bce..9a48f8349 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeApplyConfiguration represents an declarative configuration of the Volume type for use +// VolumeApplyConfiguration represents a declarative configuration of the Volume type for use // with apply. type VolumeApplyConfiguration struct { Name *string `json:"name,omitempty"` VolumeSourceApplyConfiguration `json:",inline"` } -// VolumeApplyConfiguration constructs an declarative configuration of the Volume type for use with +// VolumeApplyConfiguration constructs a declarative configuration of the Volume type for use with // apply. func Volume() *VolumeApplyConfiguration { return &VolumeApplyConfiguration{} @@ -270,3 +270,11 @@ func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApp b.Ephemeral = value return b } + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { + b.Image = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go index ea18ca8d9..0bc52aad2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeDeviceApplyConfiguration represents an declarative configuration of the VolumeDevice type for use +// VolumeDeviceApplyConfiguration represents a declarative configuration of the VolumeDevice type for use // with apply. type VolumeDeviceApplyConfiguration struct { Name *string `json:"name,omitempty"` DevicePath *string `json:"devicePath,omitempty"` } -// VolumeDeviceApplyConfiguration constructs an declarative configuration of the VolumeDevice type for use with +// VolumeDeviceApplyConfiguration constructs a declarative configuration of the VolumeDevice type for use with // apply. func VolumeDevice() *VolumeDeviceApplyConfiguration { return &VolumeDeviceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go index b0bec9ffe..49f22cc4e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go @@ -22,18 +22,19 @@ import ( v1 "k8s.io/api/core/v1" ) -// VolumeMountApplyConfiguration represents an declarative configuration of the VolumeMount type for use +// VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use // with apply. type VolumeMountApplyConfiguration struct { - Name *string `json:"name,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - MountPath *string `json:"mountPath,omitempty"` - SubPath *string `json:"subPath,omitempty"` - MountPropagation *v1.MountPropagationMode `json:"mountPropagation,omitempty"` - SubPathExpr *string `json:"subPathExpr,omitempty"` + Name *string `json:"name,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + SubPath *string `json:"subPath,omitempty"` + MountPropagation *v1.MountPropagationMode `json:"mountPropagation,omitempty"` + SubPathExpr *string `json:"subPathExpr,omitempty"` } -// VolumeMountApplyConfiguration constructs an declarative configuration of the VolumeMount type for use with +// VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with // apply. func VolumeMount() *VolumeMountApplyConfiguration { return &VolumeMountApplyConfiguration{} @@ -55,6 +56,14 @@ func (b *VolumeMountApplyConfiguration) WithReadOnly(value bool) *VolumeMountApp return b } +// WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecursiveReadOnly field is set to the value of the last call. +func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration { + b.RecursiveReadOnly = &value + return b +} + // WithMountPath sets the MountPath field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MountPath field is set to the value of the last call. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go new file mode 100644 index 000000000..a0a9b5401 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use +// with apply. +type VolumeMountStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` +} + +// VolumeMountStatusApplyConfiguration constructs a declarative configuration of the VolumeMountStatus type for use with +// apply. +func VolumeMountStatus() *VolumeMountStatusApplyConfiguration { + return &VolumeMountStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithName(value string) *VolumeMountStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithMountPath sets the MountPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountPath field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithMountPath(value string) *VolumeMountStatusApplyConfiguration { + b.MountPath = &value + return b +} + +// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadOnly field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithReadOnly(value bool) *VolumeMountStatusApplyConfiguration { + b.ReadOnly = &value + return b +} + +// WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RecursiveReadOnly field is set to the value of the last call. +func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration { + b.RecursiveReadOnly = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go index 32bfd8292..9198c25dc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeAffinityApplyConfiguration represents an declarative configuration of the VolumeNodeAffinity type for use +// VolumeNodeAffinityApplyConfiguration represents a declarative configuration of the VolumeNodeAffinity type for use // with apply. type VolumeNodeAffinityApplyConfiguration struct { Required *NodeSelectorApplyConfiguration `json:"required,omitempty"` } -// VolumeNodeAffinityApplyConfiguration constructs an declarative configuration of the VolumeNodeAffinity type for use with +// VolumeNodeAffinityApplyConfiguration constructs a declarative configuration of the VolumeNodeAffinity type for use with // apply. func VolumeNodeAffinity() *VolumeNodeAffinityApplyConfiguration { return &VolumeNodeAffinityApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go index 8d16ea79e..c14e9fe69 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go @@ -18,16 +18,17 @@ limitations under the License. package v1 -// VolumeProjectionApplyConfiguration represents an declarative configuration of the VolumeProjection type for use +// VolumeProjectionApplyConfiguration represents a declarative configuration of the VolumeProjection type for use // with apply. type VolumeProjectionApplyConfiguration struct { Secret *SecretProjectionApplyConfiguration `json:"secret,omitempty"` DownwardAPI *DownwardAPIProjectionApplyConfiguration `json:"downwardAPI,omitempty"` ConfigMap *ConfigMapProjectionApplyConfiguration `json:"configMap,omitempty"` ServiceAccountToken *ServiceAccountTokenProjectionApplyConfiguration `json:"serviceAccountToken,omitempty"` + ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"` } -// VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with +// VolumeProjectionApplyConfiguration constructs a declarative configuration of the VolumeProjection type for use with // apply. func VolumeProjection() *VolumeProjectionApplyConfiguration { return &VolumeProjectionApplyConfiguration{} @@ -64,3 +65,11 @@ func (b *VolumeProjectionApplyConfiguration) WithServiceAccountToken(value *Serv b.ServiceAccountToken = value return b } + +// WithClusterTrustBundle sets the ClusterTrustBundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterTrustBundle field is set to the value of the last call. +func (b *VolumeProjectionApplyConfiguration) WithClusterTrustBundle(value *ClusterTrustBundleProjectionApplyConfiguration) *VolumeProjectionApplyConfiguration { + b.ClusterTrustBundle = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go new file mode 100644 index 000000000..ae849f774 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use +// with apply. +type VolumeResourceRequirementsApplyConfiguration struct { + Limits *v1.ResourceList `json:"limits,omitempty"` + Requests *v1.ResourceList `json:"requests,omitempty"` +} + +// VolumeResourceRequirementsApplyConfiguration constructs a declarative configuration of the VolumeResourceRequirements type for use with +// apply. +func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration { + return &VolumeResourceRequirementsApplyConfiguration{} +} + +// WithLimits sets the Limits field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limits field is set to the value of the last call. +func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { + b.Limits = &value + return b +} + +// WithRequests sets the Requests field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Requests field is set to the value of the last call. +func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { + b.Requests = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go index 4a8d316dd..aeead953c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeSourceApplyConfiguration represents an declarative configuration of the VolumeSource type for use +// VolumeSourceApplyConfiguration represents a declarative configuration of the VolumeSource type for use // with apply. type VolumeSourceApplyConfiguration struct { HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"` @@ -50,9 +50,10 @@ type VolumeSourceApplyConfiguration struct { StorageOS *StorageOSVolumeSourceApplyConfiguration `json:"storageos,omitempty"` CSI *CSIVolumeSourceApplyConfiguration `json:"csi,omitempty"` Ephemeral *EphemeralVolumeSourceApplyConfiguration `json:"ephemeral,omitempty"` + Image *ImageVolumeSourceApplyConfiguration `json:"image,omitempty"` } -// VolumeSourceApplyConfiguration constructs an declarative configuration of the VolumeSource type for use with +// VolumeSourceApplyConfiguration constructs a declarative configuration of the VolumeSource type for use with // apply. func VolumeSource() *VolumeSourceApplyConfiguration { return &VolumeSourceApplyConfiguration{} @@ -289,3 +290,11 @@ func (b *VolumeSourceApplyConfiguration) WithEphemeral(value *EphemeralVolumeSou b.Ephemeral = value return b } + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeSourceApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeSourceApplyConfiguration { + b.Image = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go index ff3e3e27d..ea8fd8d62 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VsphereVirtualDiskVolumeSourceApplyConfiguration represents an declarative configuration of the VsphereVirtualDiskVolumeSource type for use +// VsphereVirtualDiskVolumeSourceApplyConfiguration represents a declarative configuration of the VsphereVirtualDiskVolumeSource type for use // with apply. type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { VolumePath *string `json:"volumePath,omitempty"` @@ -27,7 +27,7 @@ type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { StoragePolicyID *string `json:"storagePolicyID,omitempty"` } -// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the VsphereVirtualDiskVolumeSource type for use with +// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the VsphereVirtualDiskVolumeSource type for use with // apply. func VsphereVirtualDiskVolumeSource() *VsphereVirtualDiskVolumeSourceApplyConfiguration { return &VsphereVirtualDiskVolumeSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go index eb99d06ff..c49ef93eb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// WeightedPodAffinityTermApplyConfiguration represents an declarative configuration of the WeightedPodAffinityTerm type for use +// WeightedPodAffinityTermApplyConfiguration represents a declarative configuration of the WeightedPodAffinityTerm type for use // with apply. type WeightedPodAffinityTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` PodAffinityTerm *PodAffinityTermApplyConfiguration `json:"podAffinityTerm,omitempty"` } -// WeightedPodAffinityTermApplyConfiguration constructs an declarative configuration of the WeightedPodAffinityTerm type for use with +// WeightedPodAffinityTermApplyConfiguration constructs a declarative configuration of the WeightedPodAffinityTerm type for use with // apply. func WeightedPodAffinityTerm() *WeightedPodAffinityTermApplyConfiguration { return &WeightedPodAffinityTermApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go index 20692e014..bb37a500b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WindowsSecurityContextOptionsApplyConfiguration represents an declarative configuration of the WindowsSecurityContextOptions type for use +// WindowsSecurityContextOptionsApplyConfiguration represents a declarative configuration of the WindowsSecurityContextOptions type for use // with apply. type WindowsSecurityContextOptionsApplyConfiguration struct { GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"` @@ -27,7 +27,7 @@ type WindowsSecurityContextOptionsApplyConfiguration struct { HostProcess *bool `json:"hostProcess,omitempty"` } -// WindowsSecurityContextOptionsApplyConfiguration constructs an declarative configuration of the WindowsSecurityContextOptions type for use with +// WindowsSecurityContextOptionsApplyConfiguration constructs a declarative configuration of the WindowsSecurityContextOptions type for use with // apply. func WindowsSecurityContextOptions() *WindowsSecurityContextOptionsApplyConfiguration { return &WindowsSecurityContextOptionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go index d8c2359a3..df45a6fb8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -35,7 +35,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go index 68c25dd57..20f0b9712 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go index 6eb9f21a5..d2d0f6776 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go index c71295600..12908deb6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go index 6feaab25d..97002d2bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithSelfLink(value string) *EndpointSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *Endp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithClusterName(value string) *EndpointSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -293,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go index 192a5ad2e..505d11ae2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go index 724c2d007..5d87dae72 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -34,7 +34,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go index bc0438f90..13f5fa557 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go index 41d80206b..99f69027a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go index 9a3a31b96..07cfc684b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go index bacc1134d..888319bc0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type EndpointSliceApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithSelfLink(value string) *EndpointSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *Endp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithClusterName(value string) *EndpointSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -293,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go index 4d1455ed3..4af09cc49 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go +++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go index 19cc9e0ad..a6e98d1c8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -49,7 +49,7 @@ type EventApplyConfiguration struct { DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -139,15 +139,6 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EventApplyConfiguration) WithSelfLink(value string) *EventApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -257,15 +248,6 @@ func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EventApplyConfiguration) WithClusterName(value string) *EventApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -383,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go index e66fb4127..18069c0d1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go index f02bdd2b9..890d95748 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -49,7 +49,7 @@ type EventApplyConfiguration struct { DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -139,15 +139,6 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EventApplyConfiguration) WithSelfLink(value string) *EventApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -257,15 +248,6 @@ func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EventApplyConfiguration) WithClusterName(value string) *EventApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -383,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go index 640a26517..75d936e8b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go +++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go index 145587325..ff778529c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithSelfLink(value string) *DaemonSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSe return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithClusterName(value string) *DaemonSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *DaemonSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go index bbf718f0f..9b8057e69 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { Type *v1beta1.DaemonSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go index b5d7a0c16..d62896918 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go index be6b3b285..373f9ef97 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go index 2c827e62d..e597b15a6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { Type *v1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go index e64e4ca38..6badc64d8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithSelfLink(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *Deploym return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithClusterName(value string) *DeploymentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *DeploymentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go index d8a214b7f..79e109a77 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` @@ -35,7 +35,7 @@ type DeploymentConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go index 5e18476bd..5531c756f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go index f8d1cf5d2..adc023a34 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go index 7c17b4072..2d88406eb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index c7434a6af..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go deleted file mode 100644 index 7c7968813..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go index 361605d8c..3826e0ddd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/extensions/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go index 3137bc5eb..124545223 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go deleted file mode 100644 index af46f7658..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go index df4fbc2cd..6738bf07b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithSelfLink(value string) *IngressApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithClusterName(value string) *IngressApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go index f19c2f2ee..9d386f160 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go new file mode 100644 index 000000000..12dbc3596 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use +// with apply. +type IngressLoadBalancerIngressApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` +} + +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with +// apply. +func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { + return &IngressLoadBalancerIngressApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithIP(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.IP = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithHostname(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.Hostname = &value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithPorts(values ...*IngressPortStatusApplyConfiguration) *IngressLoadBalancerIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go new file mode 100644 index 000000000..e896ab341 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use +// with apply. +type IngressLoadBalancerStatusApplyConfiguration struct { + Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with +// apply. +func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { + return &IngressLoadBalancerStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *IngressLoadBalancerStatusApplyConfiguration) WithIngress(values ...*IngressLoadBalancerIngressApplyConfiguration) *IngressLoadBalancerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go new file mode 100644 index 000000000..4ee3f0161 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use +// with apply. +type IngressPortStatusApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Protocol *v1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` +} + +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with +// apply. +func IngressPortStatus() *IngressPortStatusApplyConfiguration { + return &IngressPortStatusApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPortStatusApplyConfiguration { + b.Port = &value + return b +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { + b.Protocol = &value + return b +} + +// WithError sets the Error field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Error field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithError(value string) *IngressPortStatusApplyConfiguration { + b.Error = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go index 015541eeb..dc676f7b6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go index 2d03c7b13..4a6412475 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go index 1ab4d8bb7..58fbde8b3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go index 941769594..3aed61688 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go @@ -18,17 +18,13 @@ limitations under the License. package v1beta1 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { - LoadBalancer *v1.LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` + LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} @@ -37,7 +33,7 @@ func IngressStatus() *IngressStatusApplyConfiguration { // WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *v1.LoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { +func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *IngressLoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { b.LoadBalancer = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go index 8ca93a0bc..63648cd46 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go index a90d3b220..4a671130b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index 965292516..fb1f95a6d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct { Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithSelfLink(value string) *NetworkPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *Netw return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithClusterName(value string) *NetworkPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *NetworkPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go index 6335ec375..ca3e174f9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go index 2ecc4c8c6..160713720 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go index c69b28122..8a0fa5741 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct { IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go index 0140d771b..6bc1c1977 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { Protocol *v1.Protocol `json:"protocol,omitempty"` @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct { EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go index 179e4bd02..4454329c5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct { PolicyTypes []extensionsv1beta1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index de3949dc9..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go index b57cefc9d..24c6b6ad1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithSelfLink(value string) *ReplicaSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *Replica return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithClusterName(value string) *ReplicaSetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ReplicaSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go index b71736517..21a25ae81 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { Type *v1beta1.ReplicaSetConditionType `json:"type,omitempty"` @@ -34,7 +34,7 @@ type ReplicaSetConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go index 5d0c57014..27653dd1a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go index 45dc4bf31..9a5b468a3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go index 131e57a39..775f82eef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go index 3aa5e2f89..4352f7fac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go index dde5f064b..244701a5e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index 75e76e85f..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index 712c1675a..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce61..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go index 701145825..101aa055b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go +++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -34,10 +34,13 @@ type ScaleApplyConfiguration struct { Status *v1beta1.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { - return &ScaleApplyConfiguration{} + b := &ScaleApplyConfiguration{} + b.WithKind("Scale") + b.WithAPIVersion("extensions/v1beta1") + return b } // WithKind sets the Kind field in the declarative configuration to the given value @@ -83,15 +86,6 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -201,15 +195,6 @@ func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyCo return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -231,3 +216,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleAp b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index 265906a73..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index ec4313812..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..4e5805f39 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go similarity index 71% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go index 507f8e9ab..0f3b61af9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1alpha1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1alpha1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go similarity index 86% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go index 2a76cf32e..9e3978af5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,13 +36,13 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} b.WithName(name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b } @@ -57,27 +57,27 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "") } // ExtractFlowSchemaStatus is the same as ExtractFlowSchema except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchemaStatus(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "status") } -func extractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { +func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} - err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b, subresource) + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(flowSchema.Name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b, nil } @@ -124,15 +124,6 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithSelfLink(value string) *FlowSchemaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSch return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithClusterName(value string) *FlowSchemaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go new file mode 100644 index 000000000..5f26a66d2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use +// with apply. +type FlowSchemaConditionApplyConfiguration struct { + Type *v1.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with +// apply. +func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { + return &FlowSchemaConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *FlowSchemaConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithReason(value string) *FlowSchemaConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *FlowSchemaConditionApplyConfiguration) WithMessage(value string) *FlowSchemaConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go index fd5fc0ae9..4efd5d287 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go @@ -16,9 +16,9 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go similarity index 85% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go index db2dacf13..6f951967e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go similarity index 83% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go index 0421f3f59..0be9eddfd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go similarity index 52% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go index df3fecbd7..8e2764298 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go @@ -16,26 +16,28 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { - AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} } -// WithAssuredConcurrencyShares sets the AssuredConcurrencyShares field in the declarative configuration to the given value +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AssuredConcurrencyShares field is set to the value of the last call. -func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithAssuredConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { - b.AssuredConcurrencyShares = &value +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value return b } @@ -46,3 +48,19 @@ func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse( b.LimitResponse = value return b } + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go similarity index 77% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go index 5edaa025c..454ed8beb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go @@ -16,20 +16,20 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1alpha1.LimitResponseType `json:"type,omitempty"` + Type *v1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1alpha1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value v1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go similarity index 87% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go index b1f09f530..29c26b340 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go similarity index 91% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go index 841104064..088afdc58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go @@ -16,9 +16,9 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go similarity index 86% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go index 4f36afe53..bcce2679c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,13 +36,13 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} b.WithName(name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b } @@ -57,27 +57,27 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") } // ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") } -func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b, subresource) + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(priorityLevelConfiguration.Name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") return b, nil } @@ -124,15 +124,6 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithSelfLink(value string) *PriorityLevelConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ... return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithClusterName(value string) *PriorityLevelConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go new file mode 100644 index 000000000..42ccbfbf9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use +// with apply. +type PriorityLevelConfigurationConditionApplyConfiguration struct { + Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with +// apply. +func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { + return &PriorityLevelConfigurationConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithReason(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithMessage(value string) *PriorityLevelConfigurationConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go similarity index 88% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go index b477c04df..f445713f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go similarity index 67% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go index 3949dee46..2262dedca 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go @@ -16,20 +16,21 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1alpha1.PriorityLevelEnablement `json:"type,omitempty"` + Type *v1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} @@ -38,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1alpha1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } @@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li b.Limited = value return b } + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go similarity index 90% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go index eb3ef3d61..ff650bc3d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go similarity index 89% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go index 0fccc3f08..7488f9bbe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go @@ -16,9 +16,9 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go similarity index 93% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go index d2c6f4eed..7428582a8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go @@ -16,9 +16,9 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go similarity index 86% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go index 270b5225e..58ad10764 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go similarity index 86% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go index 83c09d644..1ec77ae89 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go @@ -16,22 +16,22 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1alpha1.SubjectKind `json:"kind,omitempty"` + Kind *v1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1alpha1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value v1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go similarity index 84% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go index a762c249e..fd90067d4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..45ccc5cb7 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go index 6dc1bb4d6..29a8999b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go index 794ff25a7..09bd25890 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithSelfLink(value string) *FlowSchemaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSch return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithClusterName(value string) *FlowSchemaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go index b62e9a22f..d1c3dbec6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta1.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go index 8d72c2d0d..1d6e8fc58 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go index 6bc6d0543..5ad8a432b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go index 95b416e42..cc274fe2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go index f7f77f9b2..0fe5feca1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go @@ -18,14 +18,16 @@ limitations under the License. package v1beta1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} @@ -46,3 +48,19 @@ func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse( b.LimitResponse = value return b } + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go index 86e1bef6b..66f327601 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go index 594ebc991..3c571ccb0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go index ea5b266b4..32a082dc7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go index 57d1cd397..c4243f874 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithSelfLink(value string) *PriorityLevelConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ... return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithClusterName(value string) *PriorityLevelConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go index 59bc61051..1ad4a554b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go index c44bcc08b..b5e773e82 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go index 8ed4e399f..b013845f4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go @@ -22,14 +22,15 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} @@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li b.Limited = value return b } + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go index 3c27e6aa6..875b01efe 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go index 5e6e6e7b0..85a8b8863 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go index 2e12ee1cc..5c67dad75 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go index f5a146a9b..439e5ff75 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go index af571029f..b5c231f6d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta1.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go index 35bf27a59..bc2deae4c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..0c02d9b38 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta2 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go index 924f966d4..e3c4b97a7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go @@ -22,13 +22,13 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go index 323d7241d..ffc3af950 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithSelfLink(value string) *FlowSchemaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSch return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithClusterName(value string) *FlowSchemaApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go index 04dfcbf11..44571d263 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type FlowSchemaConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go index a5477e276..6eab63bfa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go index 67c5be2cb..70ac997e4 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go index b670f2cfd..25207d7c1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go index e25f7f6b0..298dd4637 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go @@ -18,14 +18,16 @@ limitations under the License. package v1beta2 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} @@ -46,3 +48,19 @@ func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse( b.LimitResponse = value return b } + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go index a9b7661fb..38a513d30 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go @@ -22,14 +22,14 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { Type *v1beta2.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go index cb8ba0afd..5032ee489 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go index 179c3979d..2bb8c8718 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go index 4ac11bba6..7d52ca2c2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithSelfLink(value string) *PriorityLevelConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ... return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithClusterName(value string) *PriorityLevelConfigurationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go index f742adeff..ddb17e984 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` @@ -33,7 +33,7 @@ type PriorityLevelConfigurationConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go index 581b451ff..bbf718b60 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go index 5560ed9e5..c083ad0ba 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -22,14 +22,15 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} @@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li b.Limited = value return b } + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go index b55e32be0..7a1f8790b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go index 06246fb27..19c34c5f8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go index b67ea1c7f..070d2ed46 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go index b6cfdcad3..c0d44721c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go index 7030785b8..2cfaab43d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go @@ -22,7 +22,7 @@ import ( v1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *v1beta2.SubjectKind `json:"kind,omitempty"` @@ -31,7 +31,7 @@ type SubjectApplyConfiguration struct { ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go index 8c77b3e8a..c249f042d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go new file mode 100644 index 000000000..b9bf6993a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use +// with apply. +type ExemptPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` +} + +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// apply. +func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { + return &ExemptPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go new file mode 100644 index 000000000..49d84bd86 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use +// with apply. +type FlowDistinguisherMethodApplyConfiguration struct { + Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` +} + +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with +// apply. +func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { + return &FlowDistinguisherMethodApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go similarity index 62% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go index cceec69f9..1f69c43b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1beta3 import ( - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,64 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` + Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func FlowSchema(name string) *FlowSchemaApplyConfiguration { + b := &FlowSchemaApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractFlowSchema extracts the applied configuration owned by fieldManager from +// flowSchema. If no managedFields are found in flowSchema for fieldManager, a +// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API. +// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { + return extractFlowSchema(flowSchema, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { + b := &FlowSchemaApplyConfiguration{} + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.FlowSchema"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(flowSchema.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("extensions/v1beta1") + b.WithKind("FlowSchema") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { b.Kind = &value return b } @@ -91,7 +92,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +100,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +109,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,25 +118,16 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSelfLink(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -144,7 +136,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -153,7 +145,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -162,7 +154,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -171,7 +163,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -180,7 +172,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -190,7 +182,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -205,7 +197,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -219,7 +211,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -233,7 +225,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -241,16 +233,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithClusterName(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -259,7 +242,21 @@ func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfiguration // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithSpec(value *FlowSchemaSpecApplyConfiguration) *FlowSchemaApplyConfiguration { b.Spec = value return b } + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyConfiguration) *FlowSchemaApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go similarity index 77% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go index 31f5dc13e..41d623aeb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1beta3 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1alpha1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1beta3.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1alpha1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1alpha1.FlowSche // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go new file mode 100644 index 000000000..7141f6a6a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use +// with apply. +type FlowSchemaSpecApplyConfiguration struct { + PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` + MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"` + DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"` + Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` +} + +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with +// apply. +func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { + return &FlowSchemaSpecApplyConfiguration{} +} + +// WithPriorityLevelConfiguration sets the PriorityLevelConfiguration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PriorityLevelConfiguration field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithPriorityLevelConfiguration(value *PriorityLevelConfigurationReferenceApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.PriorityLevelConfiguration = value + return b +} + +// WithMatchingPrecedence sets the MatchingPrecedence field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchingPrecedence field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithMatchingPrecedence(value int32) *FlowSchemaSpecApplyConfiguration { + b.MatchingPrecedence = &value + return b +} + +// WithDistinguisherMethod sets the DistinguisherMethod field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DistinguisherMethod field is set to the value of the last call. +func (b *FlowSchemaSpecApplyConfiguration) WithDistinguisherMethod(value *FlowDistinguisherMethodApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + b.DistinguisherMethod = value + return b +} + +// WithRules adds the given value to the Rules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Rules field. +func (b *FlowSchemaSpecApplyConfiguration) WithRules(values ...*PolicyRulesWithSubjectsApplyConfiguration) *FlowSchemaSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRules") + } + b.Rules = append(b.Rules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go new file mode 100644 index 000000000..294ddc909 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use +// with apply. +type FlowSchemaStatusApplyConfiguration struct { + Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with +// apply. +func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { + return &FlowSchemaStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *FlowSchemaStatusApplyConfiguration) WithConditions(values ...*FlowSchemaConditionApplyConfiguration) *FlowSchemaStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go similarity index 65% rename from vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go index 27b49bf15..6576e716e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/allowedcsidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1beta3 -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. -type AllowedCSIDriverApplyConfiguration struct { +type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} +func GroupSubject() *GroupSubjectApplyConfiguration { + return &GroupSubjectApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { +func (b *GroupSubjectApplyConfiguration) WithName(value string) *GroupSubjectApplyConfiguration { b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go new file mode 100644 index 000000000..bd98dd683 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use +// with apply. +type LimitedPriorityLevelConfigurationApplyConfiguration struct { + NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` + LendablePercent *int32 `json:"lendablePercent,omitempty"` + BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` +} + +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// apply. +func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { + return &LimitedPriorityLevelConfigurationApplyConfiguration{} +} + +// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.NominalConcurrencyShares = &value + return b +} + +// WithLimitResponse sets the LimitResponse field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LimitResponse field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLimitResponse(value *LimitResponseApplyConfiguration) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LimitResponse = value + return b +} + +// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LendablePercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.LendablePercent = &value + return b +} + +// WithBorrowingLimitPercent sets the BorrowingLimitPercent field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BorrowingLimitPercent field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithBorrowingLimitPercent(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.BorrowingLimitPercent = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go new file mode 100644 index 000000000..8deaabdeb --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use +// with apply. +type LimitResponseApplyConfiguration struct { + Type *v1beta3.LimitResponseType `json:"type,omitempty"` + Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` +} + +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with +// apply. +func LimitResponse() *LimitResponseApplyConfiguration { + return &LimitResponseApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithType(value v1beta3.LimitResponseType) *LimitResponseApplyConfiguration { + b.Type = &value + return b +} + +// WithQueuing sets the Queuing field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queuing field is set to the value of the last call. +func (b *LimitResponseApplyConfiguration) WithQueuing(value *QueuingConfigurationApplyConfiguration) *LimitResponseApplyConfiguration { + b.Queuing = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go new file mode 100644 index 000000000..2dd0d2b06 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use +// with apply. +type NonResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + NonResourceURLs []string `json:"nonResourceURLs,omitempty"` +} + +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with +// apply. +func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { + return &NonResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithNonResourceURLs adds the given value to the NonResourceURLs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceURLs field. +func (b *NonResourcePolicyRuleApplyConfiguration) WithNonResourceURLs(values ...string) *NonResourcePolicyRuleApplyConfiguration { + for i := range values { + b.NonResourceURLs = append(b.NonResourceURLs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go new file mode 100644 index 000000000..cc64dc585 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use +// with apply. +type PolicyRulesWithSubjectsApplyConfiguration struct { + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"` + NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` +} + +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with +// apply. +func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { + return &PolicyRulesWithSubjectsApplyConfiguration{} +} + +// WithSubjects adds the given value to the Subjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Subjects field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithSubjects(values ...*SubjectApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSubjects") + } + b.Subjects = append(b.Subjects, *values[i]) + } + return b +} + +// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithResourceRules(values ...*ResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceRules") + } + b.ResourceRules = append(b.ResourceRules, *values[i]) + } + return b +} + +// WithNonResourceRules adds the given value to the NonResourceRules field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NonResourceRules field. +func (b *PolicyRulesWithSubjectsApplyConfiguration) WithNonResourceRules(values ...*NonResourcePolicyRuleApplyConfiguration) *PolicyRulesWithSubjectsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithNonResourceRules") + } + b.NonResourceRules = append(b.NonResourceRules, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go new file mode 100644 index 000000000..e7d1a3a5f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use +// with apply. +type PriorityLevelConfigurationApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` +} + +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with +// apply. +func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { + b := &PriorityLevelConfigurationApplyConfiguration{} + b.WithName(name) + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") + return b +} + +// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from +// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a +// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API. +// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") +} + +// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { + return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") +} + +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { + b := &PriorityLevelConfigurationApplyConfiguration{} + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(priorityLevelConfiguration.Name) + + b.WithKind("PriorityLevelConfiguration") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithSpec(value *PriorityLevelConfigurationSpecApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *PriorityLevelConfigurationStatusApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go similarity index 81% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go index bd91b80f2..8e9687bb9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1beta3 import ( - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1alpha1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1alpha1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go new file mode 100644 index 000000000..566aaa916 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use +// with apply. +type PriorityLevelConfigurationReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with +// apply. +func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { + return &PriorityLevelConfigurationReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PriorityLevelConfigurationReferenceApplyConfiguration) WithName(value string) *PriorityLevelConfigurationReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go new file mode 100644 index 000000000..9fa1112ce --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use +// with apply. +type PriorityLevelConfigurationSpecApplyConfiguration struct { + Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"` + Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` + Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` +} + +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with +// apply. +func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { + return &PriorityLevelConfigurationSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithLimited sets the Limited field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Limited field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *LimitedPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Limited = value + return b +} + +// WithExempt sets the Exempt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Exempt field is set to the value of the last call. +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration { + b.Exempt = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go new file mode 100644 index 000000000..be2436457 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use +// with apply. +type PriorityLevelConfigurationStatusApplyConfiguration struct { + Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with +// apply. +func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { + return &PriorityLevelConfigurationStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *PriorityLevelConfigurationStatusApplyConfiguration) WithConditions(values ...*PriorityLevelConfigurationConditionApplyConfiguration) *PriorityLevelConfigurationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go new file mode 100644 index 000000000..f9a3c6d1a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use +// with apply. +type QueuingConfigurationApplyConfiguration struct { + Queues *int32 `json:"queues,omitempty"` + HandSize *int32 `json:"handSize,omitempty"` + QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` +} + +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with +// apply. +func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { + return &QueuingConfigurationApplyConfiguration{} +} + +// WithQueues sets the Queues field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Queues field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueues(value int32) *QueuingConfigurationApplyConfiguration { + b.Queues = &value + return b +} + +// WithHandSize sets the HandSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HandSize field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithHandSize(value int32) *QueuingConfigurationApplyConfiguration { + b.HandSize = &value + return b +} + +// WithQueueLengthLimit sets the QueueLengthLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the QueueLengthLimit field is set to the value of the last call. +func (b *QueuingConfigurationApplyConfiguration) WithQueueLengthLimit(value int32) *QueuingConfigurationApplyConfiguration { + b.QueueLengthLimit = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go new file mode 100644 index 000000000..e38f711db --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go @@ -0,0 +1,83 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use +// with apply. +type ResourcePolicyRuleApplyConfiguration struct { + Verbs []string `json:"verbs,omitempty"` + APIGroups []string `json:"apiGroups,omitempty"` + Resources []string `json:"resources,omitempty"` + ClusterScope *bool `json:"clusterScope,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` +} + +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with +// apply. +func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { + return &ResourcePolicyRuleApplyConfiguration{} +} + +// WithVerbs adds the given value to the Verbs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Verbs field. +func (b *ResourcePolicyRuleApplyConfiguration) WithVerbs(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Verbs = append(b.Verbs, values[i]) + } + return b +} + +// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the APIGroups field. +func (b *ResourcePolicyRuleApplyConfiguration) WithAPIGroups(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.APIGroups = append(b.APIGroups, values[i]) + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourcePolicyRuleApplyConfiguration) WithResources(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Resources = append(b.Resources, values[i]) + } + return b +} + +// WithClusterScope sets the ClusterScope field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ClusterScope field is set to the value of the last call. +func (b *ResourcePolicyRuleApplyConfiguration) WithClusterScope(value bool) *ResourcePolicyRuleApplyConfiguration { + b.ClusterScope = &value + return b +} + +// WithNamespaces adds the given value to the Namespaces field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Namespaces field. +func (b *ResourcePolicyRuleApplyConfiguration) WithNamespaces(values ...string) *ResourcePolicyRuleApplyConfiguration { + for i := range values { + b.Namespaces = append(b.Namespaces, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go new file mode 100644 index 000000000..a5ed40c2a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use +// with apply. +type ServiceAccountSubjectApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with +// apply. +func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { + return &ServiceAccountSubjectApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithNamespace(value string) *ServiceAccountSubjectApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceAccountSubjectApplyConfiguration) WithName(value string) *ServiceAccountSubjectApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go new file mode 100644 index 000000000..c412b2a7a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta3 + +import ( + v1beta3 "k8s.io/api/flowcontrol/v1beta3" +) + +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use +// with apply. +type SubjectApplyConfiguration struct { + Kind *v1beta3.SubjectKind `json:"kind,omitempty"` + User *UserSubjectApplyConfiguration `json:"user,omitempty"` + Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` + ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` +} + +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with +// apply. +func Subject() *SubjectApplyConfiguration { + return &SubjectApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithKind(value v1beta3.SubjectKind) *SubjectApplyConfiguration { + b.Kind = &value + return b +} + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithUser(value *UserSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.User = value + return b +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithGroup(value *GroupSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.Group = value + return b +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *SubjectApplyConfiguration) WithServiceAccount(value *ServiceAccountSubjectApplyConfiguration) *SubjectApplyConfiguration { + b.ServiceAccount = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go similarity index 65% rename from vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go index 27b49bf15..7b3ec2ba8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1beta1 +package v1beta3 -// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. -type AllowedCSIDriverApplyConfiguration struct { +type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. -func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { - return &AllowedCSIDriverApplyConfiguration{} +func UserSubject() *UserSubjectApplyConfiguration { + return &UserSubjectApplyConfiguration{} } // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { +func (b *UserSubjectApplyConfiguration) WithName(value string) *UserSubjectApplyConfiguration { b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 824c5e958..43c9ae05a 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -39,6 +39,64 @@ func Parser() *typed.Parser { var parserOnce sync.Once var parser *typed.Parser var schemaYAML = typed.YAMLObject(`types: +- name: io.k8s.api.admissionregistration.v1.AuditAnnotation + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: valueExpression + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1.ExpressionWarning + map: + fields: + - name: fieldRef + type: + scalar: string + default: "" + - name: warning + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1.MatchCondition + map: + fields: + - name: expression + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1.MatchResources + map: + fields: + - name: excludeResourceRules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.NamedRuleWithOperations + elementRelationship: atomic + - name: matchPolicy + type: + scalar: string + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: resourceRules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.NamedRuleWithOperations + elementRelationship: atomic + elementRelationship: atomic - name: io.k8s.api.admissionregistration.v1.MutatingWebhook map: fields: @@ -55,6 +113,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -104,7 +170,7 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - name -- name: io.k8s.api.admissionregistration.v1.RuleWithOperations +- name: io.k8s.api.admissionregistration.v1.NamedRuleWithOperations map: fields: - name: apiGroups @@ -125,6 +191,12 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: resourceNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: resources type: list: @@ -134,65 +206,111 @@ var schemaYAML = typed.YAMLObject(`types: - name: scope type: scalar: string -- name: io.k8s.api.admissionregistration.v1.ServiceReference + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1.ParamKind + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1.ParamRef map: fields: - name: name type: scalar: string - default: "" - name: namespace type: scalar: string - default: "" - - name: path + - name: parameterNotFoundAction type: scalar: string - - name: port + - name: selector type: - scalar: numeric -- name: io.k8s.api.admissionregistration.v1.ValidatingWebhook + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1.RuleWithOperations map: fields: - - name: admissionReviewVersions + - name: apiGroups type: list: elementType: scalar: string elementRelationship: atomic - - name: clientConfig + - name: apiVersions type: - namedType: io.k8s.api.admissionregistration.v1.WebhookClientConfig - default: {} - - name: failurePolicy + list: + elementType: + scalar: string + elementRelationship: atomic + - name: operations type: - scalar: string - - name: matchPolicy + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resources + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: scope type: scalar: string +- name: io.k8s.api.admissionregistration.v1.ServiceReference + map: + fields: - name: name type: scalar: string default: "" - - name: namespaceSelector + - name: namespace type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: objectSelector + scalar: string + default: "" + - name: path type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: rules + scalar: string + - name: port + type: + scalar: numeric +- name: io.k8s.api.admissionregistration.v1.TypeChecking + map: + fields: + - name: expressionWarnings type: list: elementType: - namedType: io.k8s.api.admissionregistration.v1.RuleWithOperations + namedType: io.k8s.api.admissionregistration.v1.ExpressionWarning elementRelationship: atomic - - name: sideEffects +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy + map: + fields: + - name: apiVersion type: scalar: string - - name: timeoutSeconds + - name: kind type: - scalar: numeric -- name: io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec + default: {} + - name: status + type: + namedType: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus + default: {} +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding map: fields: - name: apiVersion @@ -205,27 +323,86 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: webhooks + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec + map: + fields: + - name: matchResources + type: + namedType: io.k8s.api.admissionregistration.v1.MatchResources + - name: paramRef + type: + namedType: io.k8s.api.admissionregistration.v1.ParamRef + - name: policyName + type: + scalar: string + - name: validationActions type: list: elementType: - namedType: io.k8s.api.admissionregistration.v1.ValidatingWebhook + scalar: string + elementRelationship: associative +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec + map: + fields: + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.AuditAnnotation + elementRelationship: atomic + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition elementRelationship: associative keys: - name -- name: io.k8s.api.admissionregistration.v1.WebhookClientConfig + - name: matchConstraints + type: + namedType: io.k8s.api.admissionregistration.v1.MatchResources + - name: paramKind + type: + namedType: io.k8s.api.admissionregistration.v1.ParamKind + - name: validations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.Validation + elementRelationship: atomic + - name: variables + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.Variable + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus map: fields: - - name: caBundle + - name: conditions type: - scalar: string - - name: service + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration type: - namedType: io.k8s.api.admissionregistration.v1.ServiceReference - - name: url + scalar: numeric + - name: typeChecking type: - scalar: string -- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook + namedType: io.k8s.api.admissionregistration.v1.TypeChecking +- name: io.k8s.api.admissionregistration.v1.ValidatingWebhook map: fields: - name: admissionReviewVersions @@ -236,11 +413,19 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: atomic - name: clientConfig type: - namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + namedType: io.k8s.api.admissionregistration.v1.WebhookClientConfig default: {} - name: failurePolicy type: scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.MatchCondition + elementRelationship: associative + keys: + - name - name: matchPolicy type: scalar: string @@ -254,14 +439,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: objectSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: reinvocationPolicy - type: - scalar: string - name: rules type: list: elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.RuleWithOperations + namedType: io.k8s.api.admissionregistration.v1.RuleWithOperations elementRelationship: atomic - name: sideEffects type: @@ -269,7 +451,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: timeoutSeconds type: scalar: numeric -- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration +- name: io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration map: fields: - name: apiVersion @@ -286,99 +468,146 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook + namedType: io.k8s.api.admissionregistration.v1.ValidatingWebhook elementRelationship: associative keys: - name -- name: io.k8s.api.admissionregistration.v1beta1.RuleWithOperations +- name: io.k8s.api.admissionregistration.v1.Validation map: fields: - - name: apiGroups - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: apiVersions + - name: expression type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: operations + scalar: string + default: "" + - name: message type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: resources + scalar: string + - name: messageExpression type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: scope + scalar: string + - name: reason type: scalar: string -- name: io.k8s.api.admissionregistration.v1beta1.ServiceReference +- name: io.k8s.api.admissionregistration.v1.Variable map: fields: - - name: name + - name: expression type: scalar: string default: "" - - name: namespace + - name: name type: scalar: string default: "" - - name: path + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1.WebhookClientConfig + map: + fields: + - name: caBundle type: scalar: string - - name: port + - name: service type: - scalar: numeric -- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook + namedType: io.k8s.api.admissionregistration.v1.ServiceReference + - name: url + type: + scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation map: fields: - - name: admissionReviewVersions + - name: key type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: clientConfig + scalar: string + default: "" + - name: valueExpression type: - namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig - default: {} - - name: failurePolicy + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + map: + fields: + - name: fieldRef type: scalar: string - - name: matchPolicy + default: "" + - name: warning + type: + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + map: + fields: + - name: expression type: scalar: string + default: "" - name: name type: scalar: string default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.MatchResources + map: + fields: + - name: excludeResourceRules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations + elementRelationship: atomic + - name: matchPolicy + type: + scalar: string - name: namespaceSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - name: objectSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: rules + - name: resourceRules type: list: elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.RuleWithOperations + namedType: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations elementRelationship: atomic - - name: sideEffects + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations + map: + fields: + - name: apiGroups type: - scalar: string - - name: timeoutSeconds + list: + elementType: + scalar: string + elementRelationship: atomic + - name: apiVersions type: - scalar: numeric -- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration + list: + elementType: + scalar: string + elementRelationship: atomic + - name: operations + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourceNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resources + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: scope + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ParamKind map: fields: - name: apiVersion @@ -387,46 +616,33 @@ var schemaYAML = typed.YAMLObject(`types: - name: kind type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: webhooks - type: - list: - elementType: - namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook - elementRelationship: associative - keys: - - name -- name: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ParamRef map: fields: - - name: caBundle + - name: name type: scalar: string - - name: service + - name: namespace type: - namedType: io.k8s.api.admissionregistration.v1beta1.ServiceReference - - name: url + scalar: string + - name: parameterNotFoundAction type: scalar: string -- name: io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.TypeChecking map: fields: - - name: apiServerID - type: - scalar: string - - name: decodableVersions + - name: expressionWarnings type: list: elementType: - scalar: string - elementRelationship: associative - - name: encodingVersion - type: - scalar: string -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersion + namedType: io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy map: fields: - name: apiVersion @@ -441,206 +657,242 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec default: {} - name: status type: - namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus default: {} -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding map: fields: - - name: lastTransitionTime + - name: apiVersion type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: message + scalar: string + - name: kind type: scalar: string - - name: observedGeneration + - name: metadata type: - scalar: numeric - - name: reason + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - scalar: string - default: "" - - name: status + namedType: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec + map: + fields: + - name: matchResources type: - scalar: string - default: "" - - name: type + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: paramRef + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamRef + - name: policyName type: scalar: string - default: "" -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec - map: - elementType: - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_deduced_ - elementRelationship: separable -- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus + - name: validationActions + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec map: fields: - - name: commonEncodingVersion + - name: auditAnnotations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation + elementRelationship: atomic + - name: failurePolicy type: scalar: string - - name: conditions + - name: matchConditions type: list: elementType: - namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition elementRelationship: associative keys: - - type - - name: storageVersions + - name + - name: matchConstraints + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: paramKind + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamKind + - name: validations type: list: elementType: - namedType: io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion + namedType: io.k8s.api.admissionregistration.v1alpha1.Validation + elementRelationship: atomic + - name: variables + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.Variable elementRelationship: associative keys: - - apiServerID -- name: io.k8s.api.apps.v1.ControllerRevision + - name +- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus map: fields: - - name: apiVersion - type: - scalar: string - - name: data - type: - namedType: __untyped_atomic_ - default: {} - - name: kind - type: - scalar: string - - name: metadata + - name: conditions type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: revision + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration type: scalar: numeric - default: 0 -- name: io.k8s.api.apps.v1.DaemonSet + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.TypeChecking +- name: io.k8s.api.admissionregistration.v1alpha1.Validation map: fields: - - name: apiVersion + - name: expression type: scalar: string - - name: kind + default: "" + - name: message type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + - name: messageExpression type: - namedType: io.k8s.api.apps.v1.DaemonSetSpec - default: {} - - name: status + scalar: string + - name: reason type: - namedType: io.k8s.api.apps.v1.DaemonSetStatus - default: {} -- name: io.k8s.api.apps.v1.DaemonSetCondition + scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.Variable map: fields: - - name: lastTransitionTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: message + - name: expression type: scalar: string - - name: reason + default: "" + - name: name type: scalar: string - - name: status + default: "" +- name: io.k8s.api.admissionregistration.v1beta1.AuditAnnotation + map: + fields: + - name: key type: scalar: string default: "" - - name: type + - name: valueExpression type: scalar: string default: "" -- name: io.k8s.api.apps.v1.DaemonSetSpec +- name: io.k8s.api.admissionregistration.v1beta1.ExpressionWarning map: fields: - - name: minReadySeconds - type: - scalar: numeric - - name: revisionHistoryLimit + - name: fieldRef type: - scalar: numeric - - name: selector + scalar: string + default: "" + - name: warning type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: template + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1beta1.MatchCondition + map: + fields: + - name: expression type: - namedType: io.k8s.api.core.v1.PodTemplateSpec - default: {} - - name: updateStrategy + scalar: string + default: "" + - name: name type: - namedType: io.k8s.api.apps.v1.DaemonSetUpdateStrategy - default: {} -- name: io.k8s.api.apps.v1.DaemonSetStatus + scalar: string + default: "" +- name: io.k8s.api.admissionregistration.v1beta1.MatchResources map: fields: - - name: collisionCount + - name: excludeResourceRules type: - scalar: numeric - - name: conditions + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations + elementRelationship: atomic + - name: matchPolicy + type: + scalar: string + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: resourceRules type: list: elementType: - namedType: io.k8s.api.apps.v1.DaemonSetCondition - elementRelationship: associative - keys: - - type - - name: currentNumberScheduled + namedType: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations + elementRelationship: atomic + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook + map: + fields: + - name: admissionReviewVersions type: - scalar: numeric - default: 0 - - name: desiredNumberScheduled + list: + elementType: + scalar: string + elementRelationship: atomic + - name: clientConfig type: - scalar: numeric - default: 0 - - name: numberAvailable + namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + default: {} + - name: failurePolicy type: - scalar: numeric - - name: numberMisscheduled + scalar: string + - name: matchConditions type: - scalar: numeric - default: 0 - - name: numberReady + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchPolicy type: - scalar: numeric - default: 0 - - name: numberUnavailable + scalar: string + - name: name type: - scalar: numeric - - name: observedGeneration + scalar: string + default: "" + - name: namespaceSelector type: - scalar: numeric - - name: updatedNumberScheduled + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector type: - scalar: numeric -- name: io.k8s.api.apps.v1.DaemonSetUpdateStrategy - map: - fields: - - name: rollingUpdate + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: reinvocationPolicy type: - namedType: io.k8s.api.apps.v1.RollingUpdateDaemonSet - - name: type + scalar: string + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.RuleWithOperations + elementRelationship: atomic + - name: sideEffects type: scalar: string -- name: io.k8s.api.apps.v1.Deployment + - name: timeoutSeconds + type: + scalar: numeric +- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration map: fields: - name: apiVersion @@ -653,110 +905,104 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: spec - type: - namedType: io.k8s.api.apps.v1.DeploymentSpec - default: {} - - name: status + - name: webhooks type: - namedType: io.k8s.api.apps.v1.DeploymentStatus - default: {} -- name: io.k8s.api.apps.v1.DeploymentCondition + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations map: fields: - - name: lastTransitionTime + - name: apiGroups type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: lastUpdateTime + list: + elementType: + scalar: string + elementRelationship: atomic + - name: apiVersions type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: message + list: + elementType: + scalar: string + elementRelationship: atomic + - name: operations type: - scalar: string - - name: reason + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourceNames type: - scalar: string - - name: status + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resources type: - scalar: string - default: "" - - name: type + list: + elementType: + scalar: string + elementRelationship: atomic + - name: scope type: scalar: string - default: "" -- name: io.k8s.api.apps.v1.DeploymentSpec + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ParamKind map: fields: - - name: minReadySeconds + - name: apiVersion type: - scalar: numeric - - name: paused + scalar: string + - name: kind type: - scalar: boolean - - name: progressDeadlineSeconds + scalar: string + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ParamRef + map: + fields: + - name: name type: - scalar: numeric - - name: replicas + scalar: string + - name: namespace type: - scalar: numeric - - name: revisionHistoryLimit + scalar: string + - name: parameterNotFoundAction type: - scalar: numeric + scalar: string - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: strategy - type: - namedType: io.k8s.api.apps.v1.DeploymentStrategy - default: {} - - name: template - type: - namedType: io.k8s.api.core.v1.PodTemplateSpec - default: {} -- name: io.k8s.api.apps.v1.DeploymentStatus + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ServiceReference map: fields: - - name: availableReplicas - type: - scalar: numeric - - name: collisionCount - type: - scalar: numeric - - name: conditions - type: - list: - elementType: - namedType: io.k8s.api.apps.v1.DeploymentCondition - elementRelationship: associative - keys: - - type - - name: observedGeneration - type: - scalar: numeric - - name: readyReplicas + - name: name type: - scalar: numeric - - name: replicas + scalar: string + default: "" + - name: namespace type: - scalar: numeric - - name: unavailableReplicas + scalar: string + default: "" + - name: path type: - scalar: numeric - - name: updatedReplicas + scalar: string + - name: port type: scalar: numeric -- name: io.k8s.api.apps.v1.DeploymentStrategy +- name: io.k8s.api.admissionregistration.v1beta1.TypeChecking map: fields: - - name: rollingUpdate - type: - namedType: io.k8s.api.apps.v1.RollingUpdateDeployment - - name: type + - name: expressionWarnings type: - scalar: string -- name: io.k8s.api.apps.v1.ReplicaSet + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.ExpressionWarning + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy map: fields: - name: apiVersion @@ -771,101 +1017,154 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apps.v1.ReplicaSetSpec + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec default: {} - name: status type: - namedType: io.k8s.api.apps.v1.ReplicaSetStatus + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus default: {} -- name: io.k8s.api.apps.v1.ReplicaSetCondition +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding map: fields: - - name: lastTransitionTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: message - type: - scalar: string - - name: reason + - name: apiVersion type: scalar: string - - name: status + - name: kind type: scalar: string - default: "" - - name: type + - name: metadata type: - scalar: string - default: "" -- name: io.k8s.api.apps.v1.ReplicaSetSpec + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec map: fields: - - name: minReadySeconds + - name: matchResources type: - scalar: numeric - - name: replicas + namedType: io.k8s.api.admissionregistration.v1beta1.MatchResources + - name: paramRef type: - scalar: numeric - - name: selector + namedType: io.k8s.api.admissionregistration.v1beta1.ParamRef + - name: policyName type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: template + scalar: string + - name: validationActions type: - namedType: io.k8s.api.core.v1.PodTemplateSpec - default: {} -- name: io.k8s.api.apps.v1.ReplicaSetStatus + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec map: fields: - - name: availableReplicas + - name: auditAnnotations type: - scalar: numeric - - name: conditions + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.AuditAnnotation + elementRelationship: atomic + - name: failurePolicy + type: + scalar: string + - name: matchConditions type: list: elementType: - namedType: io.k8s.api.apps.v1.ReplicaSetCondition + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition elementRelationship: associative keys: - - type - - name: fullyLabeledReplicas + - name + - name: matchConstraints type: - scalar: numeric - - name: observedGeneration + namedType: io.k8s.api.admissionregistration.v1beta1.MatchResources + - name: paramKind type: - scalar: numeric - - name: readyReplicas + namedType: io.k8s.api.admissionregistration.v1beta1.ParamKind + - name: validations type: - scalar: numeric - - name: replicas + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.Validation + elementRelationship: atomic + - name: variables type: - scalar: numeric - default: 0 -- name: io.k8s.api.apps.v1.RollingUpdateDaemonSet + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.Variable + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus map: fields: - - name: maxSurge + - name: conditions type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: observedGeneration type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.apps.v1.RollingUpdateDeployment + scalar: numeric + - name: typeChecking + type: + namedType: io.k8s.api.admissionregistration.v1beta1.TypeChecking +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook map: fields: - - name: maxSurge + - name: admissionReviewVersions type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + list: + elementType: + scalar: string + elementRelationship: atomic + - name: clientConfig type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy - map: - fields: - - name: partition + namedType: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig + default: {} + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchPolicy + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: namespaceSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: objectSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1.RuleWithOperations + elementRelationship: atomic + - name: sideEffects + type: + scalar: string + - name: timeoutSeconds type: scalar: numeric -- name: io.k8s.api.apps.v1.StatefulSet +- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration map: fields: - name: apiVersion @@ -878,142 +1177,163 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: spec - type: - namedType: io.k8s.api.apps.v1.StatefulSetSpec - default: {} - - name: status + - name: webhooks type: - namedType: io.k8s.api.apps.v1.StatefulSetStatus - default: {} -- name: io.k8s.api.apps.v1.StatefulSetCondition + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook + elementRelationship: associative + keys: + - name +- name: io.k8s.api.admissionregistration.v1beta1.Validation map: fields: - - name: lastTransitionTime + - name: expression type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} + scalar: string + default: "" - name: message type: scalar: string + - name: messageExpression + type: + scalar: string - name: reason type: scalar: string - - name: status +- name: io.k8s.api.admissionregistration.v1beta1.Variable + map: + fields: + - name: expression type: scalar: string default: "" - - name: type + - name: name type: scalar: string default: "" -- name: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig map: fields: - - name: whenDeleted + - name: caBundle type: scalar: string - - name: whenScaled + - name: service + type: + namedType: io.k8s.api.admissionregistration.v1beta1.ServiceReference + - name: url type: scalar: string -- name: io.k8s.api.apps.v1.StatefulSetSpec +- name: io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion map: fields: - - name: minReadySeconds + - name: apiServerID type: - scalar: numeric - - name: persistentVolumeClaimRetentionPolicy + scalar: string + - name: decodableVersions type: - namedType: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy - - name: podManagementPolicy + list: + elementType: + scalar: string + elementRelationship: associative + - name: encodingVersion type: scalar: string - - name: replicas - type: - scalar: numeric - - name: revisionHistoryLimit + - name: servedVersions type: - scalar: numeric - - name: selector + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersion + map: + fields: + - name: apiVersion type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: serviceName + scalar: string + - name: kind type: scalar: string - default: "" - - name: template + - name: metadata type: - namedType: io.k8s.api.core.v1.PodTemplateSpec + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: updateStrategy + - name: spec type: - namedType: io.k8s.api.apps.v1.StatefulSetUpdateStrategy + namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec default: {} - - name: volumeClaimTemplates + - name: status type: - list: - elementType: - namedType: io.k8s.api.core.v1.PersistentVolumeClaim - elementRelationship: atomic -- name: io.k8s.api.apps.v1.StatefulSetStatus + namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus + default: {} +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition map: fields: - - name: availableReplicas - type: - scalar: numeric - default: 0 - - name: collisionCount - type: - scalar: numeric - - name: conditions - type: - list: - elementType: - namedType: io.k8s.api.apps.v1.StatefulSetCondition - elementRelationship: associative - keys: - - type - - name: currentReplicas + - name: lastTransitionTime type: - scalar: numeric - - name: currentRevision + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message type: scalar: string - name: observedGeneration type: scalar: numeric - - name: readyReplicas - type: - scalar: numeric - - name: replicas - type: - scalar: numeric - default: 0 - - name: updateRevision + - name: reason type: scalar: string - - name: updatedReplicas - type: - scalar: numeric -- name: io.k8s.api.apps.v1.StatefulSetUpdateStrategy - map: - fields: - - name: rollingUpdate + default: "" + - name: status type: - namedType: io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy + scalar: string + default: "" - name: type type: scalar: string -- name: io.k8s.api.apps.v1beta1.ControllerRevision + default: "" +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec map: - fields: - - name: apiVersion + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus + map: + fields: + - name: commonEncodingVersion + type: + scalar: string + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition + elementRelationship: associative + keys: + - type + - name: storageVersions + type: + list: + elementType: + namedType: io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion + elementRelationship: associative + keys: + - apiServerID +- name: io.k8s.api.apps.v1.ControllerRevision + map: + fields: + - name: apiVersion type: scalar: string - name: data type: namedType: __untyped_atomic_ - default: {} - name: kind type: scalar: string @@ -1025,7 +1345,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.apps.v1beta1.Deployment +- name: io.k8s.api.apps.v1.DaemonSet map: fields: - name: apiVersion @@ -1040,23 +1360,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apps.v1beta1.DeploymentSpec + namedType: io.k8s.api.apps.v1.DaemonSetSpec default: {} - name: status type: - namedType: io.k8s.api.apps.v1beta1.DeploymentStatus + namedType: io.k8s.api.apps.v1.DaemonSetStatus default: {} -- name: io.k8s.api.apps.v1beta1.DeploymentCondition +- name: io.k8s.api.apps.v1.DaemonSetCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: lastUpdateTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1071,44 +1386,29 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.apps.v1beta1.DeploymentSpec +- name: io.k8s.api.apps.v1.DaemonSetSpec map: fields: - name: minReadySeconds type: scalar: numeric - - name: paused - type: - scalar: boolean - - name: progressDeadlineSeconds - type: - scalar: numeric - - name: replicas - type: - scalar: numeric - name: revisionHistoryLimit type: scalar: numeric - - name: rollbackTo - type: - namedType: io.k8s.api.apps.v1beta1.RollbackConfig - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: strategy - type: - namedType: io.k8s.api.apps.v1beta1.DeploymentStrategy - default: {} - name: template type: namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} -- name: io.k8s.api.apps.v1beta1.DeploymentStatus + - name: updateStrategy + type: + namedType: io.k8s.api.apps.v1.DaemonSetUpdateStrategy + default: {} +- name: io.k8s.api.apps.v1.DaemonSetStatus map: fields: - - name: availableReplicas - type: - scalar: numeric - name: collisionCount type: scalar: numeric @@ -1116,56 +1416,48 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.apps.v1beta1.DeploymentCondition + namedType: io.k8s.api.apps.v1.DaemonSetCondition elementRelationship: associative keys: - type - - name: observedGeneration + - name: currentNumberScheduled type: scalar: numeric - - name: readyReplicas + default: 0 + - name: desiredNumberScheduled type: scalar: numeric - - name: replicas + default: 0 + - name: numberAvailable type: scalar: numeric - - name: unavailableReplicas + - name: numberMisscheduled type: scalar: numeric - - name: updatedReplicas + default: 0 + - name: numberReady type: scalar: numeric -- name: io.k8s.api.apps.v1beta1.DeploymentStrategy - map: - fields: - - name: rollingUpdate + default: 0 + - name: numberUnavailable type: - namedType: io.k8s.api.apps.v1beta1.RollingUpdateDeployment - - name: type + scalar: numeric + - name: observedGeneration type: - scalar: string -- name: io.k8s.api.apps.v1beta1.RollbackConfig - map: - fields: - - name: revision + scalar: numeric + - name: updatedNumberScheduled type: scalar: numeric -- name: io.k8s.api.apps.v1beta1.RollingUpdateDeployment +- name: io.k8s.api.apps.v1.DaemonSetUpdateStrategy map: fields: - - name: maxSurge - type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + - name: rollingUpdate type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy - map: - fields: - - name: partition + namedType: io.k8s.api.apps.v1.RollingUpdateDaemonSet + - name: type type: - scalar: numeric -- name: io.k8s.api.apps.v1beta1.StatefulSet + scalar: string +- name: io.k8s.api.apps.v1.Deployment map: fields: - name: apiVersion @@ -1180,19 +1472,21 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apps.v1beta1.StatefulSetSpec + namedType: io.k8s.api.apps.v1.DeploymentSpec default: {} - name: status type: - namedType: io.k8s.api.apps.v1beta1.StatefulSetStatus + namedType: io.k8s.api.apps.v1.DeploymentStatus default: {} -- name: io.k8s.api.apps.v1beta1.StatefulSetCondition +- name: io.k8s.api.apps.v1.DeploymentCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: message type: scalar: string @@ -1207,27 +1501,18 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy - map: - fields: - - name: whenDeleted - type: - scalar: string - - name: whenScaled - type: - scalar: string -- name: io.k8s.api.apps.v1beta1.StatefulSetSpec +- name: io.k8s.api.apps.v1.DeploymentSpec map: fields: - name: minReadySeconds type: scalar: numeric - - name: persistentVolumeClaimRetentionPolicy + - name: paused type: - namedType: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy - - name: podManagementPolicy + scalar: boolean + - name: progressDeadlineSeconds type: - scalar: string + scalar: numeric - name: replicas type: scalar: numeric @@ -1237,31 +1522,20 @@ var schemaYAML = typed.YAMLObject(`types: - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: serviceName + - name: strategy type: - scalar: string - default: "" + namedType: io.k8s.api.apps.v1.DeploymentStrategy + default: {} - name: template type: namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} - - name: updateStrategy - type: - namedType: io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy - default: {} - - name: volumeClaimTemplates - type: - list: - elementType: - namedType: io.k8s.api.core.v1.PersistentVolumeClaim - elementRelationship: atomic -- name: io.k8s.api.apps.v1beta1.StatefulSetStatus +- name: io.k8s.api.apps.v1.DeploymentStatus map: fields: - name: availableReplicas type: scalar: numeric - default: 0 - name: collisionCount type: scalar: numeric @@ -1269,16 +1543,10 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.apps.v1beta1.StatefulSetCondition + namedType: io.k8s.api.apps.v1.DeploymentCondition elementRelationship: associative keys: - type - - name: currentReplicas - type: - scalar: numeric - - name: currentRevision - type: - scalar: string - name: observedGeneration type: scalar: numeric @@ -1288,44 +1556,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: replicas type: scalar: numeric - default: 0 - - name: updateRevision + - name: unavailableReplicas type: - scalar: string + scalar: numeric - name: updatedReplicas type: scalar: numeric -- name: io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy +- name: io.k8s.api.apps.v1.DeploymentStrategy map: fields: - name: rollingUpdate type: - namedType: io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy + namedType: io.k8s.api.apps.v1.RollingUpdateDeployment - name: type type: scalar: string -- name: io.k8s.api.apps.v1beta2.ControllerRevision - map: - fields: - - name: apiVersion - type: - scalar: string - - name: data - type: - namedType: __untyped_atomic_ - default: {} - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: revision - type: - scalar: numeric - default: 0 -- name: io.k8s.api.apps.v1beta2.DaemonSet +- name: io.k8s.api.apps.v1.ReplicaSet map: fields: - name: apiVersion @@ -1340,19 +1586,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apps.v1beta2.DaemonSetSpec + namedType: io.k8s.api.apps.v1.ReplicaSetSpec default: {} - name: status type: - namedType: io.k8s.api.apps.v1beta2.DaemonSetStatus + namedType: io.k8s.api.apps.v1.ReplicaSetStatus default: {} -- name: io.k8s.api.apps.v1beta2.DaemonSetCondition +- name: io.k8s.api.apps.v1.ReplicaSetCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1367,13 +1612,13 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.apps.v1beta2.DaemonSetSpec +- name: io.k8s.api.apps.v1.ReplicaSetSpec map: fields: - name: minReadySeconds type: scalar: numeric - - name: revisionHistoryLimit + - name: replicas type: scalar: numeric - name: selector @@ -1383,62 +1628,61 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} - - name: updateStrategy - type: - namedType: io.k8s.api.apps.v1beta2.DaemonSetUpdateStrategy - default: {} -- name: io.k8s.api.apps.v1beta2.DaemonSetStatus +- name: io.k8s.api.apps.v1.ReplicaSetStatus map: fields: - - name: collisionCount + - name: availableReplicas type: scalar: numeric - name: conditions type: list: elementType: - namedType: io.k8s.api.apps.v1beta2.DaemonSetCondition + namedType: io.k8s.api.apps.v1.ReplicaSetCondition elementRelationship: associative keys: - type - - name: currentNumberScheduled + - name: fullyLabeledReplicas type: scalar: numeric - default: 0 - - name: desiredNumberScheduled + - name: observedGeneration type: scalar: numeric - default: 0 - - name: numberAvailable + - name: readyReplicas type: scalar: numeric - - name: numberMisscheduled + - name: replicas type: scalar: numeric default: 0 - - name: numberReady +- name: io.k8s.api.apps.v1.RollingUpdateDaemonSet + map: + fields: + - name: maxSurge type: - scalar: numeric - default: 0 - - name: numberUnavailable + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable type: - scalar: numeric - - name: observedGeneration + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.apps.v1.RollingUpdateDeployment + map: + fields: + - name: maxSurge type: - scalar: numeric - - name: updatedNumberScheduled + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable type: - scalar: numeric -- name: io.k8s.api.apps.v1beta2.DaemonSetUpdateStrategy + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy map: fields: - - name: rollingUpdate + - name: maxUnavailable type: - namedType: io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet - - name: type + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: partition type: - scalar: string -- name: io.k8s.api.apps.v1beta2.Deployment + scalar: numeric +- name: io.k8s.api.apps.v1.StatefulSet map: fields: - name: apiVersion @@ -1453,23 +1697,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apps.v1beta2.DeploymentSpec + namedType: io.k8s.api.apps.v1.StatefulSetSpec default: {} - name: status type: - namedType: io.k8s.api.apps.v1beta2.DeploymentStatus + namedType: io.k8s.api.apps.v1.StatefulSetStatus default: {} -- name: io.k8s.api.apps.v1beta2.DeploymentCondition +- name: io.k8s.api.apps.v1.StatefulSetCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: lastUpdateTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1484,18 +1723,37 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.apps.v1beta2.DeploymentSpec +- name: io.k8s.api.apps.v1.StatefulSetOrdinals map: fields: - - name: minReadySeconds + - name: start type: scalar: numeric - - name: paused + default: 0 +- name: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy + map: + fields: + - name: whenDeleted type: - scalar: boolean - - name: progressDeadlineSeconds + scalar: string + - name: whenScaled + type: + scalar: string +- name: io.k8s.api.apps.v1.StatefulSetSpec + map: + fields: + - name: minReadySeconds type: scalar: numeric + - name: ordinals + type: + namedType: io.k8s.api.apps.v1.StatefulSetOrdinals + - name: persistentVolumeClaimRetentionPolicy + type: + namedType: io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy + - name: podManagementPolicy + type: + scalar: string - name: replicas type: scalar: numeric @@ -1505,20 +1763,31 @@ var schemaYAML = typed.YAMLObject(`types: - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: strategy + - name: serviceName type: - namedType: io.k8s.api.apps.v1beta2.DeploymentStrategy - default: {} + scalar: string + default: "" - name: template type: namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} -- name: io.k8s.api.apps.v1beta2.DeploymentStatus + - name: updateStrategy + type: + namedType: io.k8s.api.apps.v1.StatefulSetUpdateStrategy + default: {} + - name: volumeClaimTemplates + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PersistentVolumeClaim + elementRelationship: atomic +- name: io.k8s.api.apps.v1.StatefulSetStatus map: fields: - name: availableReplicas type: scalar: numeric + default: 0 - name: collisionCount type: scalar: numeric @@ -1526,10 +1795,16 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.apps.v1beta2.DeploymentCondition + namedType: io.k8s.api.apps.v1.StatefulSetCondition elementRelationship: associative keys: - type + - name: currentReplicas + type: + scalar: numeric + - name: currentRevision + type: + scalar: string - name: observedGeneration type: scalar: numeric @@ -1539,22 +1814,43 @@ var schemaYAML = typed.YAMLObject(`types: - name: replicas type: scalar: numeric - - name: unavailableReplicas + default: 0 + - name: updateRevision type: - scalar: numeric + scalar: string - name: updatedReplicas type: scalar: numeric -- name: io.k8s.api.apps.v1beta2.DeploymentStrategy +- name: io.k8s.api.apps.v1.StatefulSetUpdateStrategy map: fields: - name: rollingUpdate type: - namedType: io.k8s.api.apps.v1beta2.RollingUpdateDeployment + namedType: io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy - name: type type: scalar: string -- name: io.k8s.api.apps.v1beta2.ReplicaSet +- name: io.k8s.api.apps.v1beta1.ControllerRevision + map: + fields: + - name: apiVersion + type: + scalar: string + - name: data + type: + namedType: __untyped_atomic_ + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: revision + type: + scalar: numeric + default: 0 +- name: io.k8s.api.apps.v1beta1.Deployment map: fields: - name: apiVersion @@ -1569,19 +1865,21 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apps.v1beta2.ReplicaSetSpec + namedType: io.k8s.api.apps.v1beta1.DeploymentSpec default: {} - name: status type: - namedType: io.k8s.api.apps.v1beta2.ReplicaSetStatus + namedType: io.k8s.api.apps.v1beta1.DeploymentStatus default: {} -- name: io.k8s.api.apps.v1beta2.ReplicaSetCondition +- name: io.k8s.api.apps.v1beta1.DeploymentCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: message type: scalar: string @@ -1596,39 +1894,55 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.apps.v1beta2.ReplicaSetSpec +- name: io.k8s.api.apps.v1beta1.DeploymentSpec map: fields: - name: minReadySeconds type: scalar: numeric + - name: paused + type: + scalar: boolean + - name: progressDeadlineSeconds + type: + scalar: numeric - name: replicas type: scalar: numeric + - name: revisionHistoryLimit + type: + scalar: numeric + - name: rollbackTo + type: + namedType: io.k8s.api.apps.v1beta1.RollbackConfig - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: strategy + type: + namedType: io.k8s.api.apps.v1beta1.DeploymentStrategy + default: {} - name: template type: namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} -- name: io.k8s.api.apps.v1beta2.ReplicaSetStatus +- name: io.k8s.api.apps.v1beta1.DeploymentStatus map: fields: - name: availableReplicas type: scalar: numeric + - name: collisionCount + type: + scalar: numeric - name: conditions type: list: elementType: - namedType: io.k8s.api.apps.v1beta2.ReplicaSetCondition + namedType: io.k8s.api.apps.v1beta1.DeploymentCondition elementRelationship: associative keys: - type - - name: fullyLabeledReplicas - type: - scalar: numeric - name: observedGeneration type: scalar: numeric @@ -1638,17 +1952,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: replicas type: scalar: numeric - default: 0 -- name: io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet + - name: unavailableReplicas + type: + scalar: numeric + - name: updatedReplicas + type: + scalar: numeric +- name: io.k8s.api.apps.v1beta1.DeploymentStrategy map: fields: - - name: maxSurge + - name: rollingUpdate type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + namedType: io.k8s.api.apps.v1beta1.RollingUpdateDeployment + - name: type type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.apps.v1beta2.RollingUpdateDeployment + scalar: string +- name: io.k8s.api.apps.v1beta1.RollbackConfig + map: + fields: + - name: revision + type: + scalar: numeric +- name: io.k8s.api.apps.v1beta1.RollingUpdateDeployment map: fields: - name: maxSurge @@ -1657,13 +1982,16 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.apps.v1beta2.RollingUpdateStatefulSetStrategy +- name: io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy map: fields: + - name: maxUnavailable + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - name: partition type: scalar: numeric -- name: io.k8s.api.apps.v1beta2.StatefulSet +- name: io.k8s.api.apps.v1beta1.StatefulSet map: fields: - name: apiVersion @@ -1678,19 +2006,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.apps.v1beta2.StatefulSetSpec + namedType: io.k8s.api.apps.v1beta1.StatefulSetSpec default: {} - name: status type: - namedType: io.k8s.api.apps.v1beta2.StatefulSetStatus + namedType: io.k8s.api.apps.v1beta1.StatefulSetStatus default: {} -- name: io.k8s.api.apps.v1beta2.StatefulSetCondition +- name: io.k8s.api.apps.v1beta1.StatefulSetCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -1705,7 +2032,14 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy +- name: io.k8s.api.apps.v1beta1.StatefulSetOrdinals + map: + fields: + - name: start + type: + scalar: numeric + default: 0 +- name: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy map: fields: - name: whenDeleted @@ -1714,15 +2048,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: whenScaled type: scalar: string -- name: io.k8s.api.apps.v1beta2.StatefulSetSpec +- name: io.k8s.api.apps.v1beta1.StatefulSetSpec map: fields: - name: minReadySeconds type: scalar: numeric + - name: ordinals + type: + namedType: io.k8s.api.apps.v1beta1.StatefulSetOrdinals - name: persistentVolumeClaimRetentionPolicy type: - namedType: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy + namedType: io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy - name: podManagementPolicy type: scalar: string @@ -1745,7 +2082,7 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: updateStrategy type: - namedType: io.k8s.api.apps.v1beta2.StatefulSetUpdateStrategy + namedType: io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy default: {} - name: volumeClaimTemplates type: @@ -1753,7 +2090,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.PersistentVolumeClaim elementRelationship: atomic -- name: io.k8s.api.apps.v1beta2.StatefulSetStatus +- name: io.k8s.api.apps.v1beta1.StatefulSetStatus map: fields: - name: availableReplicas @@ -1767,7 +2104,7 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.apps.v1beta2.StatefulSetCondition + namedType: io.k8s.api.apps.v1beta1.StatefulSetCondition elementRelationship: associative keys: - type @@ -1793,31 +2130,36 @@ var schemaYAML = typed.YAMLObject(`types: - name: updatedReplicas type: scalar: numeric -- name: io.k8s.api.apps.v1beta2.StatefulSetUpdateStrategy +- name: io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy map: fields: - name: rollingUpdate type: - namedType: io.k8s.api.apps.v1beta2.RollingUpdateStatefulSetStrategy + namedType: io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy - name: type type: scalar: string -- name: io.k8s.api.autoscaling.v1.CrossVersionObjectReference +- name: io.k8s.api.apps.v1beta2.ControllerRevision map: fields: - name: apiVersion type: scalar: string + - name: data + type: + namedType: __untyped_atomic_ - name: kind type: scalar: string - default: "" - - name: name + - name: metadata type: - scalar: string - default: "" - elementRelationship: atomic -- name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: revision + type: + scalar: numeric + default: 0 +- name: io.k8s.api.apps.v1beta2.DaemonSet map: fields: - name: apiVersion @@ -1832,146 +2174,218 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec + namedType: io.k8s.api.apps.v1beta2.DaemonSetSpec default: {} - name: status type: - namedType: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus + namedType: io.k8s.api.apps.v1beta2.DaemonSetStatus default: {} -- name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec +- name: io.k8s.api.apps.v1beta2.DaemonSetCondition map: fields: - - name: maxReplicas + - name: lastTransitionTime type: - scalar: numeric - default: 0 - - name: minReplicas + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message type: - scalar: numeric - - name: scaleTargetRef + scalar: string + - name: reason type: - namedType: io.k8s.api.autoscaling.v1.CrossVersionObjectReference - default: {} - - name: targetCPUUtilizationPercentage + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.apps.v1beta2.DaemonSetSpec + map: + fields: + - name: minReadySeconds type: scalar: numeric -- name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus + - name: revisionHistoryLimit + type: + scalar: numeric + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec + default: {} + - name: updateStrategy + type: + namedType: io.k8s.api.apps.v1beta2.DaemonSetUpdateStrategy + default: {} +- name: io.k8s.api.apps.v1beta2.DaemonSetStatus map: fields: - - name: currentCPUUtilizationPercentage + - name: collisionCount type: scalar: numeric - - name: currentReplicas + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.apps.v1beta2.DaemonSetCondition + elementRelationship: associative + keys: + - type + - name: currentNumberScheduled type: scalar: numeric default: 0 - - name: desiredReplicas + - name: desiredNumberScheduled type: scalar: numeric default: 0 - - name: lastScaleTime + - name: numberAvailable type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: numeric + - name: numberMisscheduled + type: + scalar: numeric + default: 0 + - name: numberReady + type: + scalar: numeric + default: 0 + - name: numberUnavailable + type: + scalar: numeric - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource + - name: updatedNumberScheduled + type: + scalar: numeric +- name: io.k8s.api.apps.v1beta2.DaemonSetUpdateStrategy map: fields: - - name: container + - name: rollingUpdate type: - scalar: string - default: "" - - name: name + namedType: io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet + - name: type type: scalar: string - default: "" - - name: target - type: - namedType: io.k8s.api.autoscaling.v2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus +- name: io.k8s.api.apps.v1beta2.Deployment map: fields: - - name: container + - name: apiVersion type: scalar: string - default: "" - - name: current + - name: kind type: - namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: name + - name: spec type: - scalar: string - default: "" -- name: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + namedType: io.k8s.api.apps.v1beta2.DeploymentSpec + default: {} + - name: status + type: + namedType: io.k8s.api.apps.v1beta2.DeploymentStatus + default: {} +- name: io.k8s.api.apps.v1beta2.DeploymentCondition map: fields: - - name: apiVersion + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message type: scalar: string - - name: kind + - name: reason + type: + scalar: string + - name: status type: scalar: string default: "" - - name: name + - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2.ExternalMetricSource +- name: io.k8s.api.apps.v1beta2.DeploymentSpec map: fields: - - name: metric + - name: minReadySeconds type: - namedType: io.k8s.api.autoscaling.v2.MetricIdentifier - default: {} - - name: target + scalar: numeric + - name: paused type: - namedType: io.k8s.api.autoscaling.v2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2.ExternalMetricStatus - map: - fields: - - name: current + scalar: boolean + - name: progressDeadlineSeconds type: - namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + scalar: numeric + - name: replicas + type: + scalar: numeric + - name: revisionHistoryLimit + type: + scalar: numeric + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: strategy + type: + namedType: io.k8s.api.apps.v1beta2.DeploymentStrategy default: {} - - name: metric + - name: template type: - namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} -- name: io.k8s.api.autoscaling.v2.HPAScalingPolicy +- name: io.k8s.api.apps.v1beta2.DeploymentStatus map: fields: - - name: periodSeconds + - name: availableReplicas type: scalar: numeric - default: 0 - - name: type - type: - scalar: string - default: "" - - name: value + - name: collisionCount type: scalar: numeric - default: 0 -- name: io.k8s.api.autoscaling.v2.HPAScalingRules - map: - fields: - - name: policies + - name: conditions type: list: elementType: - namedType: io.k8s.api.autoscaling.v2.HPAScalingPolicy - elementRelationship: atomic - - name: selectPolicy + namedType: io.k8s.api.apps.v1beta2.DeploymentCondition + elementRelationship: associative + keys: + - type + - name: observedGeneration type: - scalar: string - - name: stabilizationWindowSeconds + scalar: numeric + - name: readyReplicas type: scalar: numeric -- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler + - name: replicas + type: + scalar: numeric + - name: unavailableReplicas + type: + scalar: numeric + - name: updatedReplicas + type: + scalar: numeric +- name: io.k8s.api.apps.v1beta2.DeploymentStrategy + map: + fields: + - name: rollingUpdate + type: + namedType: io.k8s.api.apps.v1beta2.RollingUpdateDeployment + - name: type + type: + scalar: string +- name: io.k8s.api.apps.v1beta2.ReplicaSet map: fields: - name: apiVersion @@ -1986,28 +2400,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec + namedType: io.k8s.api.apps.v1beta2.ReplicaSetSpec default: {} - name: status type: - namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus + namedType: io.k8s.api.apps.v1beta2.ReplicaSetStatus default: {} -- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior - map: - fields: - - name: scaleDown - type: - namedType: io.k8s.api.autoscaling.v2.HPAScalingRules - - name: scaleUp - type: - namedType: io.k8s.api.autoscaling.v2.HPAScalingRules -- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition +- name: io.k8s.api.apps.v1beta2.ReplicaSetCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2022,251 +2426,225 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec +- name: io.k8s.api.apps.v1beta2.ReplicaSetSpec map: fields: - - name: behavior - type: - namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior - - name: maxReplicas + - name: minReadySeconds type: scalar: numeric - default: 0 - - name: metrics - type: - list: - elementType: - namedType: io.k8s.api.autoscaling.v2.MetricSpec - elementRelationship: atomic - - name: minReplicas + - name: replicas type: scalar: numeric - - name: scaleTargetRef + - name: selector type: - namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} -- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus +- name: io.k8s.api.apps.v1beta2.ReplicaSetStatus map: fields: + - name: availableReplicas + type: + scalar: numeric - name: conditions type: list: elementType: - namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition + namedType: io.k8s.api.apps.v1beta2.ReplicaSetCondition elementRelationship: associative keys: - type - - name: currentMetrics + - name: fullyLabeledReplicas type: - list: - elementType: - namedType: io.k8s.api.autoscaling.v2.MetricStatus - elementRelationship: atomic - - name: currentReplicas + scalar: numeric + - name: observedGeneration type: scalar: numeric - - name: desiredReplicas + - name: readyReplicas type: scalar: numeric - default: 0 - - name: lastScaleTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: observedGeneration + - name: replicas type: scalar: numeric -- name: io.k8s.api.autoscaling.v2.MetricIdentifier + default: 0 +- name: io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet map: fields: - - name: name + - name: maxSurge type: - scalar: string - default: "" - - name: selector + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.autoscaling.v2.MetricSpec + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.apps.v1beta2.RollingUpdateDeployment map: fields: - - name: containerResource - type: - namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource - - name: external - type: - namedType: io.k8s.api.autoscaling.v2.ExternalMetricSource - - name: object + - name: maxSurge type: - namedType: io.k8s.api.autoscaling.v2.ObjectMetricSource - - name: pods + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable type: - namedType: io.k8s.api.autoscaling.v2.PodsMetricSource - - name: resource + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.apps.v1beta2.RollingUpdateStatefulSetStrategy + map: + fields: + - name: maxUnavailable type: - namedType: io.k8s.api.autoscaling.v2.ResourceMetricSource - - name: type + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: partition type: - scalar: string - default: "" -- name: io.k8s.api.autoscaling.v2.MetricStatus + scalar: numeric +- name: io.k8s.api.apps.v1beta2.StatefulSet map: fields: - - name: containerResource - type: - namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus - - name: external + - name: apiVersion type: - namedType: io.k8s.api.autoscaling.v2.ExternalMetricStatus - - name: object + scalar: string + - name: kind type: - namedType: io.k8s.api.autoscaling.v2.ObjectMetricStatus - - name: pods + scalar: string + - name: metadata type: - namedType: io.k8s.api.autoscaling.v2.PodsMetricStatus - - name: resource + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - namedType: io.k8s.api.autoscaling.v2.ResourceMetricStatus - - name: type + namedType: io.k8s.api.apps.v1beta2.StatefulSetSpec + default: {} + - name: status type: - scalar: string - default: "" -- name: io.k8s.api.autoscaling.v2.MetricTarget + namedType: io.k8s.api.apps.v1beta2.StatefulSetStatus + default: {} +- name: io.k8s.api.apps.v1beta2.StatefulSetCondition map: fields: - - name: averageUtilization + - name: lastTransitionTime type: - scalar: numeric - - name: averageValue + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: type + scalar: string + - name: reason + type: + scalar: string + - name: status type: scalar: string default: "" - - name: value + - name: type type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2.MetricValueStatus + scalar: string + default: "" +- name: io.k8s.api.apps.v1beta2.StatefulSetOrdinals map: fields: - - name: averageUtilization + - name: start type: scalar: numeric - - name: averageValue - type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: value - type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2.ObjectMetricSource + default: 0 +- name: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy map: fields: - - name: describedObject - type: - namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference - default: {} - - name: metric + - name: whenDeleted type: - namedType: io.k8s.api.autoscaling.v2.MetricIdentifier - default: {} - - name: target + scalar: string + - name: whenScaled type: - namedType: io.k8s.api.autoscaling.v2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2.ObjectMetricStatus + scalar: string +- name: io.k8s.api.apps.v1beta2.StatefulSetSpec map: fields: - - name: current + - name: minReadySeconds type: - namedType: io.k8s.api.autoscaling.v2.MetricValueStatus - default: {} - - name: describedObject + scalar: numeric + - name: ordinals type: - namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference - default: {} - - name: metric + namedType: io.k8s.api.apps.v1beta2.StatefulSetOrdinals + - name: persistentVolumeClaimRetentionPolicy type: - namedType: io.k8s.api.autoscaling.v2.MetricIdentifier - default: {} -- name: io.k8s.api.autoscaling.v2.PodsMetricSource - map: - fields: - - name: metric + namedType: io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy + - name: podManagementPolicy type: - namedType: io.k8s.api.autoscaling.v2.MetricIdentifier - default: {} - - name: target + scalar: string + - name: replicas type: - namedType: io.k8s.api.autoscaling.v2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2.PodsMetricStatus - map: - fields: - - name: current + scalar: numeric + - name: revisionHistoryLimit type: - namedType: io.k8s.api.autoscaling.v2.MetricValueStatus - default: {} - - name: metric + scalar: numeric + - name: selector type: - namedType: io.k8s.api.autoscaling.v2.MetricIdentifier - default: {} -- name: io.k8s.api.autoscaling.v2.ResourceMetricSource - map: - fields: - - name: name + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: serviceName type: scalar: string default: "" - - name: target + - name: template type: - namedType: io.k8s.api.autoscaling.v2.MetricTarget + namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} -- name: io.k8s.api.autoscaling.v2.ResourceMetricStatus - map: - fields: - - name: current + - name: updateStrategy type: - namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + namedType: io.k8s.api.apps.v1beta2.StatefulSetUpdateStrategy default: {} - - name: name + - name: volumeClaimTemplates type: - scalar: string - default: "" -- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource + list: + elementType: + namedType: io.k8s.api.core.v1.PersistentVolumeClaim + elementRelationship: atomic +- name: io.k8s.api.apps.v1beta2.StatefulSetStatus map: fields: - - name: container + - name: availableReplicas type: - scalar: string - default: "" - - name: name + scalar: numeric + default: 0 + - name: collisionCount + type: + scalar: numeric + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.apps.v1beta2.StatefulSetCondition + elementRelationship: associative + keys: + - type + - name: currentReplicas + type: + scalar: numeric + - name: currentRevision type: scalar: string - default: "" - - name: targetAverageUtilization + - name: observedGeneration type: scalar: numeric - - name: targetAverageValue + - name: readyReplicas type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus - map: - fields: - - name: container + scalar: numeric + - name: replicas + type: + scalar: numeric + default: 0 + - name: updateRevision type: scalar: string - default: "" - - name: currentAverageUtilization + - name: updatedReplicas type: scalar: numeric - - name: currentAverageValue +- name: io.k8s.api.apps.v1beta2.StatefulSetUpdateStrategy + map: + fields: + - name: rollingUpdate type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - - name: name + namedType: io.k8s.api.apps.v1beta2.RollingUpdateStatefulSetStrategy + - name: type type: scalar: string - default: "" -- name: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference +- name: io.k8s.api.autoscaling.v1.CrossVersionObjectReference map: fields: - name: apiVersion @@ -2280,40 +2658,162 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource + elementRelationship: atomic +- name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler map: fields: - - name: metricName + - name: apiVersion type: scalar: string - default: "" - - name: metricSelector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: targetAverageValue + - name: kind type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: targetValue + scalar: string + - name: metadata type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec + default: {} + - name: status + type: + namedType: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus + default: {} +- name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec map: fields: - - name: currentAverageValue + - name: maxReplicas type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: currentValue + scalar: numeric + default: 0 + - name: minReplicas type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + scalar: numeric + - name: scaleTargetRef + type: + namedType: io.k8s.api.autoscaling.v1.CrossVersionObjectReference default: {} - - name: metricName + - name: targetCPUUtilizationPercentage + type: + scalar: numeric +- name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus + map: + fields: + - name: currentCPUUtilizationPercentage + type: + scalar: numeric + - name: currentReplicas + type: + scalar: numeric + default: 0 + - name: desiredReplicas + type: + scalar: numeric + default: 0 + - name: lastScaleTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: observedGeneration + type: + scalar: numeric +- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource + map: + fields: + - name: container type: scalar: string default: "" - - name: metricSelector + - name: name type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler + scalar: string + default: "" + - name: target + type: + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus + map: + fields: + - name: container + type: + scalar: string + default: "" + - name: current + type: + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2.ExternalMetricSource + map: + fields: + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} + - name: target + type: + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ExternalMetricStatus + map: + fields: + - name: current + type: + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} +- name: io.k8s.api.autoscaling.v2.HPAScalingPolicy + map: + fields: + - name: periodSeconds + type: + scalar: numeric + default: 0 + - name: type + type: + scalar: string + default: "" + - name: value + type: + scalar: numeric + default: 0 +- name: io.k8s.api.autoscaling.v2.HPAScalingRules + map: + fields: + - name: policies + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2.HPAScalingPolicy + elementRelationship: atomic + - name: selectPolicy + type: + scalar: string + - name: stabilizationWindowSeconds + type: + scalar: numeric +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler map: fields: - name: apiVersion @@ -2328,19 +2828,27 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec default: {} - name: status type: - namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus default: {} -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior + map: + fields: + - name: scaleDown + type: + namedType: io.k8s.api.autoscaling.v2.HPAScalingRules + - name: scaleUp + type: + namedType: io.k8s.api.autoscaling.v2.HPAScalingRules +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2355,9 +2863,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec map: fields: + - name: behavior + type: + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior - name: maxReplicas type: scalar: numeric @@ -2366,34 +2877,35 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta1.MetricSpec + namedType: io.k8s.api.autoscaling.v2.MetricSpec elementRelationship: atomic - name: minReplicas type: scalar: numeric - name: scaleTargetRef type: - namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference default: {} -- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus +- name: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition - elementRelationship: atomic + namedType: io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition + elementRelationship: associative + keys: + - type - name: currentMetrics type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta1.MetricStatus + namedType: io.k8s.api.autoscaling.v2.MetricStatus elementRelationship: atomic - name: currentReplicas type: scalar: numeric - default: 0 - name: desiredReplicas type: scalar: numeric @@ -2404,148 +2916,163 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.autoscaling.v2beta1.MetricSpec +- name: io.k8s.api.autoscaling.v2.MetricIdentifier + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2.MetricSpec map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource + namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricSource - name: external type: - namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource + namedType: io.k8s.api.autoscaling.v2.ExternalMetricSource - name: object type: - namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource + namedType: io.k8s.api.autoscaling.v2.ObjectMetricSource - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricSource + namedType: io.k8s.api.autoscaling.v2.PodsMetricSource - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource + namedType: io.k8s.api.autoscaling.v2.ResourceMetricSource - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.MetricStatus +- name: io.k8s.api.autoscaling.v2.MetricStatus map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus - name: external type: - namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus + namedType: io.k8s.api.autoscaling.v2.ExternalMetricStatus - name: object type: - namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus + namedType: io.k8s.api.autoscaling.v2.ObjectMetricStatus - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus + namedType: io.k8s.api.autoscaling.v2.PodsMetricStatus - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2.ResourceMetricStatus - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource +- name: io.k8s.api.autoscaling.v2.MetricTarget map: fields: + - name: averageUtilization + type: + scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: metricName + - name: type type: scalar: string default: "" - - name: selector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: target - type: - namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference - default: {} - - name: targetValue + - name: value type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} -- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus +- name: io.k8s.api.autoscaling.v2.MetricValueStatus map: fields: + - name: averageUtilization + type: + scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: currentValue + - name: value type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - - name: metricName +- name: io.k8s.api.autoscaling.v2.ObjectMetricSource + map: + fields: + - name: describedObject type: - scalar: string - default: "" - - name: selector + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + default: {} + - name: metric type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} - name: target type: - namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference + namedType: io.k8s.api.autoscaling.v2.MetricTarget default: {} -- name: io.k8s.api.autoscaling.v2beta1.PodsMetricSource +- name: io.k8s.api.autoscaling.v2.ObjectMetricStatus map: fields: - - name: metricName + - name: current type: - scalar: string - default: "" - - name: selector + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: describedObject type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: targetAverageValue + namedType: io.k8s.api.autoscaling.v2.CrossVersionObjectReference + default: {} + - name: metric type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier default: {} -- name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus +- name: io.k8s.api.autoscaling.v2.PodsMetricSource map: fields: - - name: currentAverageValue + - name: metric type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier default: {} - - name: metricName + - name: target type: - scalar: string - default: "" - - name: selector + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.PodsMetricStatus + map: + fields: + - name: current type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus + default: {} + - name: metric + type: + namedType: io.k8s.api.autoscaling.v2.MetricIdentifier + default: {} +- name: io.k8s.api.autoscaling.v2.ResourceMetricSource map: fields: - name: name type: scalar: string default: "" - - name: targetAverageUtilization - type: - scalar: numeric - - name: targetAverageValue + - name: target type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2.ResourceMetricStatus map: fields: - - name: currentAverageUtilization - type: - scalar: numeric - - name: currentAverageValue + - name: current type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + namedType: io.k8s.api.autoscaling.v2.MetricValueStatus default: {} - name: name type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource +- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource map: fields: - name: container @@ -2556,26 +3083,30 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - - name: target + - name: targetAverageUtilization type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus + scalar: numeric + - name: targetAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus map: fields: - name: container type: scalar: string default: "" - - name: current + - name: currentAverageUtilization type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus - default: {} + scalar: numeric + - name: currentAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: name type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference +- name: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference map: fields: - name: apiVersion @@ -2589,59 +3120,39 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource +- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource map: fields: - - name: metric + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} - - name: target + scalar: string + default: "" + - name: metricSelector type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus - map: - fields: - - name: current + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: targetAverageValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus - default: {} - - name: metric + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: targetValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} -- name: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus map: fields: - - name: periodSeconds - type: - scalar: numeric - default: 0 - - name: type - type: - scalar: string - default: "" - - name: value + - name: currentAverageValue type: - scalar: numeric - default: 0 -- name: io.k8s.api.autoscaling.v2beta2.HPAScalingRules - map: - fields: - - name: policies + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: currentValue type: - list: - elementType: - namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy - elementRelationship: atomic - - name: selectPolicy + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: metricName type: scalar: string - - name: stabilizationWindowSeconds + default: "" + - name: metricSelector type: - scalar: numeric -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler map: fields: - name: apiVersion @@ -2656,28 +3167,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec + namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec default: {} - name: status type: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus + namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus default: {} -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior - map: - fields: - - name: scaleDown - type: - namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules - - name: scaleUp - type: - namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -2692,12 +3193,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec map: fields: - - name: behavior - type: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior - name: maxReplicas type: scalar: numeric @@ -2706,29 +3204,29 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta2.MetricSpec + namedType: io.k8s.api.autoscaling.v2beta1.MetricSpec elementRelationship: atomic - name: minReplicas type: scalar: numeric - name: scaleTargetRef type: - namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference + namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference default: {} -- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus +- name: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition + namedType: io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition elementRelationship: atomic - name: currentMetrics type: list: elementType: - namedType: io.k8s.api.autoscaling.v2beta2.MetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.MetricStatus elementRelationship: atomic - name: currentReplicas type: @@ -2744,143 +3242,149 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - map: - fields: - - name: name - type: - scalar: string - default: "" - - name: selector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.autoscaling.v2beta2.MetricSpec +- name: io.k8s.api.autoscaling.v2beta1.MetricSpec map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource - name: external type: - namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricSource - name: object type: - namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricSource - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.MetricStatus +- name: io.k8s.api.autoscaling.v2beta1.MetricStatus map: fields: - name: containerResource type: - namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus - name: external type: - namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus - name: object type: - namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus - name: pods type: - namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus - name: resource type: - namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus + namedType: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus - name: type type: scalar: string default: "" -- name: io.k8s.api.autoscaling.v2beta2.MetricTarget +- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricSource map: fields: - - name: averageUtilization - type: - scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: type + - name: metricName type: scalar: string default: "" - - name: value + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: target + type: + namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference + default: {} + - name: targetValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta2.MetricValueStatus +- name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus map: fields: - - name: averageUtilization - type: - scalar: numeric - name: averageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: value + - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource - map: - fields: - - name: describedObject + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference - default: {} - - name: metric + scalar: string + default: "" + - name: selector type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - name: target type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget + namedType: io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference default: {} -- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus +- name: io.k8s.api.autoscaling.v2beta1.PodsMetricSource map: fields: - - name: current + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus - default: {} - - name: describedObject + scalar: string + default: "" + - name: selector type: - namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference - default: {} - - name: metric + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: targetAverageValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} -- name: io.k8s.api.autoscaling.v2beta2.PodsMetricSource + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus map: fields: - - name: metric + - name: currentAverageValue type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} - - name: target + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: metricName type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget - default: {} -- name: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus + scalar: string + default: "" + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricSource map: fields: - - name: current + - name: name type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus - default: {} - - name: metric + scalar: string + default: "" + - name: targetAverageUtilization type: - namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier - default: {} -- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + scalar: numeric + - name: targetAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus + map: + fields: + - name: currentAverageUtilization + type: + scalar: numeric + - name: currentAverageValue + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource map: fields: + - name: container + type: + scalar: string + default: "" - name: name type: scalar: string @@ -2889,9 +3393,13 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget default: {} -- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus +- name: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus map: fields: + - name: container + type: + scalar: string + default: "" - name: current type: namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus @@ -2900,7 +3408,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.batch.v1.CronJob +- name: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference map: fields: - name: apiVersion @@ -2909,60 +3417,64 @@ var schemaYAML = typed.YAMLObject(`types: - name: kind type: scalar: string - - name: metadata + default: "" + - name: name type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource + map: + fields: + - name: metric type: - namedType: io.k8s.api.batch.v1.CronJobSpec + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: status + - name: target type: - namedType: io.k8s.api.batch.v1.CronJobStatus + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget default: {} -- name: io.k8s.api.batch.v1.CronJobSpec +- name: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus map: fields: - - name: concurrencyPolicy - type: - scalar: string - - name: failedJobsHistoryLimit + - name: current type: - scalar: numeric - - name: jobTemplate + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + default: {} + - name: metric type: - namedType: io.k8s.api.batch.v1.JobTemplateSpec + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: schedule +- name: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy + map: + fields: + - name: periodSeconds + type: + scalar: numeric + default: 0 + - name: type type: scalar: string default: "" - - name: startingDeadlineSeconds - type: - scalar: numeric - - name: successfulJobsHistoryLimit + - name: value type: scalar: numeric - - name: suspend - type: - scalar: boolean -- name: io.k8s.api.batch.v1.CronJobStatus + default: 0 +- name: io.k8s.api.autoscaling.v2beta2.HPAScalingRules map: fields: - - name: active + - name: policies type: list: elementType: - namedType: io.k8s.api.core.v1.ObjectReference + namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy elementRelationship: atomic - - name: lastScheduleTime + - name: selectPolicy type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: lastSuccessfulTime + scalar: string + - name: stabilizationWindowSeconds type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time -- name: io.k8s.api.batch.v1.Job + scalar: numeric +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler map: fields: - name: apiVersion @@ -2977,23 +3489,27 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.batch.v1.JobSpec + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec default: {} - name: status type: - namedType: io.k8s.api.batch.v1.JobStatus + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus default: {} -- name: io.k8s.api.batch.v1.JobCondition +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior map: fields: - - name: lastProbeTime + - name: scaleDown type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} + namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules + - name: scaleUp + type: + namedType: io.k8s.api.autoscaling.v2beta2.HPAScalingRules +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition + map: + fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3008,272 +3524,280 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.batch.v1.JobSpec +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec map: fields: - - name: activeDeadlineSeconds - type: - scalar: numeric - - name: backoffLimit - type: - scalar: numeric - - name: completionMode + - name: behavior type: - scalar: string - - name: completions + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior + - name: maxReplicas type: scalar: numeric - - name: manualSelector + default: 0 + - name: metrics type: - scalar: boolean - - name: parallelism + list: + elementType: + namedType: io.k8s.api.autoscaling.v2beta2.MetricSpec + elementRelationship: atomic + - name: minReplicas type: scalar: numeric - - name: selector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: suspend - type: - scalar: boolean - - name: template + - name: scaleTargetRef type: - namedType: io.k8s.api.core.v1.PodTemplateSpec + namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference default: {} - - name: ttlSecondsAfterFinished - type: - scalar: numeric -- name: io.k8s.api.batch.v1.JobStatus +- name: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus map: fields: - - name: active - type: - scalar: numeric - - name: completedIndexes - type: - scalar: string - - name: completionTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: conditions type: list: elementType: - namedType: io.k8s.api.batch.v1.JobCondition + namedType: io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition elementRelationship: atomic - - name: failed + - name: currentMetrics + type: + list: + elementType: + namedType: io.k8s.api.autoscaling.v2beta2.MetricStatus + elementRelationship: atomic + - name: currentReplicas type: scalar: numeric - - name: ready + default: 0 + - name: desiredReplicas type: scalar: numeric - - name: startTime + default: 0 + - name: lastScaleTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: succeeded + - name: observedGeneration type: scalar: numeric - - name: uncountedTerminatedPods - type: - namedType: io.k8s.api.batch.v1.UncountedTerminatedPods -- name: io.k8s.api.batch.v1.JobTemplateSpec +- name: io.k8s.api.autoscaling.v2beta2.MetricIdentifier map: fields: - - name: metadata + - name: name type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + scalar: string + default: "" + - name: selector type: - namedType: io.k8s.api.batch.v1.JobSpec - default: {} -- name: io.k8s.api.batch.v1.UncountedTerminatedPods + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.autoscaling.v2beta2.MetricSpec map: fields: - - name: failed + - name: containerResource type: - list: - elementType: - scalar: string - elementRelationship: associative - - name: succeeded + namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource + - name: external type: - list: - elementType: - scalar: string - elementRelationship: associative -- name: io.k8s.api.batch.v1beta1.CronJob - map: - fields: - - name: apiVersion + namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricSource + - name: object type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata + namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource + - name: pods type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec + namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricSource + - name: resource type: - namedType: io.k8s.api.batch.v1beta1.CronJobSpec - default: {} - - name: status + namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + - name: type type: - namedType: io.k8s.api.batch.v1beta1.CronJobStatus - default: {} -- name: io.k8s.api.batch.v1beta1.CronJobSpec + scalar: string + default: "" +- name: io.k8s.api.autoscaling.v2beta2.MetricStatus map: fields: - - name: concurrencyPolicy + - name: containerResource type: - scalar: string - - name: failedJobsHistoryLimit + namedType: io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus + - name: external type: - scalar: numeric - - name: jobTemplate + namedType: io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus + - name: object type: - namedType: io.k8s.api.batch.v1beta1.JobTemplateSpec - default: {} - - name: schedule + namedType: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus + - name: pods + type: + namedType: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus + - name: resource + type: + namedType: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus + - name: type type: scalar: string default: "" - - name: startingDeadlineSeconds +- name: io.k8s.api.autoscaling.v2beta2.MetricTarget + map: + fields: + - name: averageUtilization type: scalar: numeric - - name: successfulJobsHistoryLimit + - name: averageValue type: - scalar: numeric - - name: suspend + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: type type: - scalar: boolean -- name: io.k8s.api.batch.v1beta1.CronJobStatus + scalar: string + default: "" + - name: value + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta2.MetricValueStatus map: fields: - - name: active + - name: averageUtilization type: - list: - elementType: - namedType: io.k8s.api.core.v1.ObjectReference - elementRelationship: atomic - - name: lastScheduleTime + scalar: numeric + - name: averageValue type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - - name: lastSuccessfulTime + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: value type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time -- name: io.k8s.api.batch.v1beta1.JobTemplateSpec + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricSource map: fields: - - name: metadata + - name: describedObject type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference default: {} - - name: spec + - name: metric type: - namedType: io.k8s.api.batch.v1.JobSpec + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} -- name: io.k8s.api.certificates.v1.CertificateSigningRequest + - name: target + type: + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus map: fields: - - name: apiVersion + - name: current type: - scalar: string - - name: kind + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + default: {} + - name: describedObject type: - scalar: string - - name: metadata + namedType: io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference + default: {} + - name: metric type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: spec +- name: io.k8s.api.autoscaling.v2beta2.PodsMetricSource + map: + fields: + - name: metric type: - namedType: io.k8s.api.certificates.v1.CertificateSigningRequestSpec + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: status + - name: target type: - namedType: io.k8s.api.certificates.v1.CertificateSigningRequestStatus + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget default: {} -- name: io.k8s.api.certificates.v1.CertificateSigningRequestCondition +- name: io.k8s.api.autoscaling.v2beta2.PodsMetricStatus map: fields: - - name: lastTransitionTime + - name: current type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus default: {} - - name: lastUpdateTime + - name: metric type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + namedType: io.k8s.api.autoscaling.v2beta2.MetricIdentifier default: {} - - name: message +- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricSource + map: + fields: + - name: name type: scalar: string - - name: reason + default: "" + - name: target type: - scalar: string - - name: status + namedType: io.k8s.api.autoscaling.v2beta2.MetricTarget + default: {} +- name: io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus + map: + fields: + - name: current type: - scalar: string - default: "" - - name: type + namedType: io.k8s.api.autoscaling.v2beta2.MetricValueStatus + default: {} + - name: name type: scalar: string default: "" -- name: io.k8s.api.certificates.v1.CertificateSigningRequestSpec +- name: io.k8s.api.batch.v1.CronJob map: fields: - - name: expirationSeconds + - name: apiVersion type: - scalar: numeric - - name: extra + scalar: string + - name: kind type: - map: - elementType: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: groups + scalar: string + - name: metadata type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: request + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.batch.v1.CronJobSpec + default: {} + - name: status + type: + namedType: io.k8s.api.batch.v1.CronJobStatus + default: {} +- name: io.k8s.api.batch.v1.CronJobSpec + map: + fields: + - name: concurrencyPolicy type: scalar: string - - name: signerName + - name: failedJobsHistoryLimit + type: + scalar: numeric + - name: jobTemplate + type: + namedType: io.k8s.api.batch.v1.JobTemplateSpec + default: {} + - name: schedule type: scalar: string default: "" - - name: uid + - name: startingDeadlineSeconds type: - scalar: string - - name: usages + scalar: numeric + - name: successfulJobsHistoryLimit type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: username + scalar: numeric + - name: suspend + type: + scalar: boolean + - name: timeZone type: scalar: string -- name: io.k8s.api.certificates.v1.CertificateSigningRequestStatus +- name: io.k8s.api.batch.v1.CronJobStatus map: fields: - - name: certificate - type: - scalar: string - - name: conditions + - name: active type: list: elementType: - namedType: io.k8s.api.certificates.v1.CertificateSigningRequestCondition - elementRelationship: associative - keys: - - type -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest + namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: atomic + - name: lastScheduleTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastSuccessfulTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.api.batch.v1.Job map: fields: - name: apiVersion @@ -3288,23 +3812,21 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec + namedType: io.k8s.api.batch.v1.JobSpec default: {} - name: status type: - namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus + namedType: io.k8s.api.batch.v1.JobStatus default: {} -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition +- name: io.k8s.api.batch.v1.JobCondition map: fields: - - name: lastTransitionTime + - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: lastUpdateTime + - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -3319,434 +3841,372 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec +- name: io.k8s.api.batch.v1.JobSpec map: fields: - - name: expirationSeconds + - name: activeDeadlineSeconds type: scalar: numeric - - name: extra + - name: backoffLimit type: - map: - elementType: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: groups + scalar: numeric + - name: backoffLimitPerIndex type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: request + scalar: numeric + - name: completionMode type: scalar: string - - name: signerName + - name: completions type: - scalar: string - - name: uid + scalar: numeric + - name: managedBy type: scalar: string - - name: usages + - name: manualSelector type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: username + scalar: boolean + - name: maxFailedIndexes + type: + scalar: numeric + - name: parallelism + type: + scalar: numeric + - name: podFailurePolicy + type: + namedType: io.k8s.api.batch.v1.PodFailurePolicy + - name: podReplacementPolicy type: scalar: string -- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: successPolicy + type: + namedType: io.k8s.api.batch.v1.SuccessPolicy + - name: suspend + type: + scalar: boolean + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec + default: {} + - name: ttlSecondsAfterFinished + type: + scalar: numeric +- name: io.k8s.api.batch.v1.JobStatus map: fields: - - name: certificate + - name: active + type: + scalar: numeric + - name: completedIndexes type: scalar: string + - name: completionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: conditions type: list: elementType: - namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition - elementRelationship: associative - keys: - - type -- name: io.k8s.api.coordination.v1.Lease - map: - fields: - - name: apiVersion + namedType: io.k8s.api.batch.v1.JobCondition + elementRelationship: atomic + - name: failed type: - scalar: string - - name: kind + scalar: numeric + - name: failedIndexes type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.coordination.v1.LeaseSpec - default: {} -- name: io.k8s.api.coordination.v1.LeaseSpec - map: - fields: - - name: acquireTime + - name: ready type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - - name: holderIdentity + scalar: numeric + - name: startTime type: - scalar: string - - name: leaseDurationSeconds + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: succeeded type: scalar: numeric - - name: leaseTransitions + - name: terminating type: scalar: numeric - - name: renewTime + - name: uncountedTerminatedPods type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime -- name: io.k8s.api.coordination.v1beta1.Lease + namedType: io.k8s.api.batch.v1.UncountedTerminatedPods +- name: io.k8s.api.batch.v1.JobTemplateSpec map: fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - name: metadata type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - name: spec type: - namedType: io.k8s.api.coordination.v1beta1.LeaseSpec + namedType: io.k8s.api.batch.v1.JobSpec default: {} -- name: io.k8s.api.coordination.v1beta1.LeaseSpec +- name: io.k8s.api.batch.v1.PodFailurePolicy map: fields: - - name: acquireTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - - name: holderIdentity - type: - scalar: string - - name: leaseDurationSeconds - type: - scalar: numeric - - name: leaseTransitions - type: - scalar: numeric - - name: renewTime + - name: rules type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime -- name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource + list: + elementType: + namedType: io.k8s.api.batch.v1.PodFailurePolicyRule + elementRelationship: atomic +- name: io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement map: fields: - - name: fsType + - name: containerName type: scalar: string - - name: partition - type: - scalar: numeric - - name: readOnly - type: - scalar: boolean - - name: volumeID + - name: operator type: scalar: string default: "" -- name: io.k8s.api.core.v1.Affinity - map: - fields: - - name: nodeAffinity - type: - namedType: io.k8s.api.core.v1.NodeAffinity - - name: podAffinity - type: - namedType: io.k8s.api.core.v1.PodAffinity - - name: podAntiAffinity + - name: values type: - namedType: io.k8s.api.core.v1.PodAntiAffinity -- name: io.k8s.api.core.v1.AttachedVolume + list: + elementType: + scalar: numeric + elementRelationship: associative +- name: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern map: fields: - - name: devicePath + - name: status type: scalar: string default: "" - - name: name + - name: type type: scalar: string default: "" -- name: io.k8s.api.core.v1.AzureDiskVolumeSource +- name: io.k8s.api.batch.v1.PodFailurePolicyRule map: fields: - - name: cachingMode - type: - scalar: string - - name: diskName - type: - scalar: string - default: "" - - name: diskURI + - name: action type: scalar: string default: "" - - name: fsType + - name: onExitCodes type: - scalar: string - - name: kind + namedType: io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement + - name: onPodConditions type: - scalar: string - - name: readOnly + list: + elementType: + namedType: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern + elementRelationship: atomic +- name: io.k8s.api.batch.v1.SuccessPolicy + map: + fields: + - name: rules type: - scalar: boolean -- name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource + list: + elementType: + namedType: io.k8s.api.batch.v1.SuccessPolicyRule + elementRelationship: atomic +- name: io.k8s.api.batch.v1.SuccessPolicyRule map: fields: - - name: readOnly + - name: succeededCount type: - scalar: boolean - - name: secretName + scalar: numeric + - name: succeededIndexes type: scalar: string - default: "" - - name: secretNamespace +- name: io.k8s.api.batch.v1.UncountedTerminatedPods + map: + fields: + - name: failed type: - scalar: string - - name: shareName + list: + elementType: + scalar: string + elementRelationship: associative + - name: succeeded type: - scalar: string - default: "" -- name: io.k8s.api.core.v1.AzureFileVolumeSource + list: + elementType: + scalar: string + elementRelationship: associative +- name: io.k8s.api.batch.v1beta1.CronJob map: fields: - - name: readOnly - type: - scalar: boolean - - name: secretName + - name: apiVersion type: scalar: string - default: "" - - name: shareName + - name: kind type: scalar: string - default: "" -- name: io.k8s.api.core.v1.CSIPersistentVolumeSource + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.batch.v1beta1.CronJobSpec + default: {} + - name: status + type: + namedType: io.k8s.api.batch.v1beta1.CronJobStatus + default: {} +- name: io.k8s.api.batch.v1beta1.CronJobSpec map: fields: - - name: controllerExpandSecretRef + - name: concurrencyPolicy type: - namedType: io.k8s.api.core.v1.SecretReference - - name: controllerPublishSecretRef + scalar: string + - name: failedJobsHistoryLimit type: - namedType: io.k8s.api.core.v1.SecretReference - - name: driver + scalar: numeric + - name: jobTemplate type: - scalar: string - default: "" - - name: fsType + namedType: io.k8s.api.batch.v1beta1.JobTemplateSpec + default: {} + - name: schedule type: scalar: string - - name: nodePublishSecretRef + default: "" + - name: startingDeadlineSeconds type: - namedType: io.k8s.api.core.v1.SecretReference - - name: nodeStageSecretRef + scalar: numeric + - name: successfulJobsHistoryLimit type: - namedType: io.k8s.api.core.v1.SecretReference - - name: readOnly + scalar: numeric + - name: suspend type: scalar: boolean - - name: volumeAttributes - type: - map: - elementType: - scalar: string - - name: volumeHandle + - name: timeZone type: scalar: string - default: "" -- name: io.k8s.api.core.v1.CSIVolumeSource +- name: io.k8s.api.batch.v1beta1.CronJobStatus map: fields: - - name: driver - type: - scalar: string - default: "" - - name: fsType - type: - scalar: string - - name: nodePublishSecretRef + - name: active type: - namedType: io.k8s.api.core.v1.LocalObjectReference - - name: readOnly + list: + elementType: + namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: atomic + - name: lastScheduleTime type: - scalar: boolean - - name: volumeAttributes + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastSuccessfulTime type: - map: - elementType: - scalar: string -- name: io.k8s.api.core.v1.Capabilities + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.api.batch.v1beta1.JobTemplateSpec map: fields: - - name: add + - name: metadata type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: drop + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - list: - elementType: - scalar: string - elementRelationship: atomic -- name: io.k8s.api.core.v1.CephFSPersistentVolumeSource + namedType: io.k8s.api.batch.v1.JobSpec + default: {} +- name: io.k8s.api.certificates.v1.CertificateSigningRequest map: fields: - - name: monitors - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: path + - name: apiVersion type: scalar: string - - name: readOnly - type: - scalar: boolean - - name: secretFile + - name: kind type: scalar: string - - name: secretRef + - name: metadata type: - namedType: io.k8s.api.core.v1.SecretReference - - name: user + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - scalar: string -- name: io.k8s.api.core.v1.CephFSVolumeSource + namedType: io.k8s.api.certificates.v1.CertificateSigningRequestSpec + default: {} + - name: status + type: + namedType: io.k8s.api.certificates.v1.CertificateSigningRequestStatus + default: {} +- name: io.k8s.api.certificates.v1.CertificateSigningRequestCondition map: fields: - - name: monitors - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: path + - name: lastTransitionTime type: - scalar: string - - name: readOnly + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastUpdateTime type: - scalar: boolean - - name: secretFile + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message type: scalar: string - - name: secretRef - type: - namedType: io.k8s.api.core.v1.LocalObjectReference - - name: user + - name: reason type: scalar: string -- name: io.k8s.api.core.v1.CinderPersistentVolumeSource - map: - fields: - - name: fsType + - name: status type: scalar: string - - name: readOnly - type: - scalar: boolean - - name: secretRef - type: - namedType: io.k8s.api.core.v1.SecretReference - - name: volumeID + default: "" + - name: type type: scalar: string default: "" -- name: io.k8s.api.core.v1.CinderVolumeSource +- name: io.k8s.api.certificates.v1.CertificateSigningRequestSpec map: fields: - - name: fsType + - name: expirationSeconds type: - scalar: string - - name: readOnly + scalar: numeric + - name: extra type: - scalar: boolean - - name: secretRef + map: + elementType: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: groups type: - namedType: io.k8s.api.core.v1.LocalObjectReference - - name: volumeID + list: + elementType: + scalar: string + elementRelationship: atomic + - name: request type: scalar: string - default: "" -- name: io.k8s.api.core.v1.ClientIPConfig - map: - fields: - - name: timeoutSeconds - type: - scalar: numeric -- name: io.k8s.api.core.v1.ComponentCondition - map: - fields: - - name: error + - name: signerName type: scalar: string - - name: message + default: "" + - name: uid type: scalar: string - - name: status + - name: usages type: - scalar: string - default: "" - - name: type + list: + elementType: + scalar: string + elementRelationship: atomic + - name: username type: scalar: string - default: "" -- name: io.k8s.api.core.v1.ComponentStatus +- name: io.k8s.api.certificates.v1.CertificateSigningRequestStatus map: fields: - - name: apiVersion + - name: certificate type: scalar: string - name: conditions type: list: elementType: - namedType: io.k8s.api.core.v1.ComponentCondition + namedType: io.k8s.api.certificates.v1.CertificateSigningRequestCondition elementRelationship: associative keys: - type - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} -- name: io.k8s.api.core.v1.ConfigMap +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundle map: fields: - name: apiVersion type: scalar: string - - name: binaryData - type: - map: - elementType: - scalar: string - - name: data - type: - map: - elementType: - scalar: string - - name: immutable - type: - scalar: boolean - name: kind type: scalar: string @@ -3754,62 +4214,653 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} -- name: io.k8s.api.core.v1.ConfigMapEnvSource + - name: spec + type: + namedType: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec + default: {} +- name: io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec map: fields: - - name: name + - name: signerName type: scalar: string - - name: optional + - name: trustBundle type: - scalar: boolean -- name: io.k8s.api.core.v1.ConfigMapKeySelector + scalar: string + default: "" +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest map: fields: - - name: key + - name: apiVersion type: scalar: string - default: "" - - name: name + - name: kind type: scalar: string - - name: optional + - name: metadata type: - scalar: boolean - elementRelationship: atomic -- name: io.k8s.api.core.v1.ConfigMapNodeConfigSource + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec + default: {} + - name: status + type: + namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus + default: {} +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition map: fields: - - name: kubeletConfigKey + - name: lastTransitionTime type: - scalar: string - default: "" - - name: name + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message type: scalar: string - default: "" - - name: namespace + - name: reason type: scalar: string - default: "" - - name: resourceVersion + - name: status type: scalar: string - - name: uid + default: "" + - name: type type: scalar: string -- name: io.k8s.api.core.v1.ConfigMapProjection + default: "" +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec map: fields: - - name: items + - name: expirationSeconds type: - list: - elementType: - namedType: io.k8s.api.core.v1.KeyToPath - elementRelationship: atomic - - name: name + scalar: numeric + - name: extra type: - scalar: string + map: + elementType: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: groups + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: request + type: + scalar: string + - name: signerName + type: + scalar: string + - name: uid + type: + scalar: string + - name: usages + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: username + type: + scalar: string +- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus + map: + fields: + - name: certificate + type: + scalar: string + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.coordination.v1.Lease + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1.LeaseSpec + default: {} +- name: io.k8s.api.coordination.v1.LeaseSpec + map: + fields: + - name: acquireTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: holderIdentity + type: + scalar: string + - name: leaseDurationSeconds + type: + scalar: numeric + - name: leaseTransitions + type: + scalar: numeric + - name: preferredHolder + type: + scalar: string + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidate + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec + default: {} +- name: io.k8s.api.coordination.v1alpha1.LeaseCandidateSpec + map: + fields: + - name: binaryVersion + type: + scalar: string + - name: emulationVersion + type: + scalar: string + - name: leaseName + type: + scalar: string + default: "" + - name: pingTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: preferredStrategies + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime +- name: io.k8s.api.coordination.v1beta1.Lease + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1beta1.LeaseSpec + default: {} +- name: io.k8s.api.coordination.v1beta1.LeaseSpec + map: + fields: + - name: acquireTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: holderIdentity + type: + scalar: string + - name: leaseDurationSeconds + type: + scalar: numeric + - name: leaseTransitions + type: + scalar: numeric + - name: preferredHolder + type: + scalar: string + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string +- name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: partition + type: + scalar: numeric + - name: readOnly + type: + scalar: boolean + - name: volumeID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.Affinity + map: + fields: + - name: nodeAffinity + type: + namedType: io.k8s.api.core.v1.NodeAffinity + - name: podAffinity + type: + namedType: io.k8s.api.core.v1.PodAffinity + - name: podAntiAffinity + type: + namedType: io.k8s.api.core.v1.PodAntiAffinity +- name: io.k8s.api.core.v1.AppArmorProfile + map: + fields: + - name: localhostProfile + type: + scalar: string + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: localhostProfile + discriminatorValue: LocalhostProfile +- name: io.k8s.api.core.v1.AttachedVolume + map: + fields: + - name: devicePath + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.AzureDiskVolumeSource + map: + fields: + - name: cachingMode + type: + scalar: string + default: ReadWrite + - name: diskName + type: + scalar: string + default: "" + - name: diskURI + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + default: ext4 + - name: kind + type: + scalar: string + default: Shared + - name: readOnly + type: + scalar: boolean + default: false +- name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource + map: + fields: + - name: readOnly + type: + scalar: boolean + - name: secretName + type: + scalar: string + default: "" + - name: secretNamespace + type: + scalar: string + - name: shareName + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.AzureFileVolumeSource + map: + fields: + - name: readOnly + type: + scalar: boolean + - name: secretName + type: + scalar: string + default: "" + - name: shareName + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.CSIPersistentVolumeSource + map: + fields: + - name: controllerExpandSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: controllerPublishSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: nodeExpandSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: nodePublishSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: nodeStageSecretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: readOnly + type: + scalar: boolean + - name: volumeAttributes + type: + map: + elementType: + scalar: string + - name: volumeHandle + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.CSIVolumeSource + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: nodePublishSecretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: readOnly + type: + scalar: boolean + - name: volumeAttributes + type: + map: + elementType: + scalar: string +- name: io.k8s.api.core.v1.Capabilities + map: + fields: + - name: add + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: drop + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.CephFSPersistentVolumeSource + map: + fields: + - name: monitors + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: path + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretFile + type: + scalar: string + - name: secretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: user + type: + scalar: string +- name: io.k8s.api.core.v1.CephFSVolumeSource + map: + fields: + - name: monitors + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: path + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretFile + type: + scalar: string + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: user + type: + scalar: string +- name: io.k8s.api.core.v1.CinderPersistentVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.SecretReference + - name: volumeID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.CinderVolumeSource + map: + fields: + - name: fsType + type: + scalar: string + - name: readOnly + type: + scalar: boolean + - name: secretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: volumeID + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.ClientIPConfig + map: + fields: + - name: timeoutSeconds + type: + scalar: numeric +- name: io.k8s.api.core.v1.ClusterTrustBundleProjection + map: + fields: + - name: labelSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: name + type: + scalar: string + - name: optional + type: + scalar: boolean + - name: path + type: + scalar: string + default: "" + - name: signerName + type: + scalar: string +- name: io.k8s.api.core.v1.ComponentCondition + map: + fields: + - name: error + type: + scalar: string + - name: message + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.ComponentStatus + map: + fields: + - name: apiVersion + type: + scalar: string + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ComponentCondition + elementRelationship: associative + keys: + - type + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} +- name: io.k8s.api.core.v1.ConfigMap + map: + fields: + - name: apiVersion + type: + scalar: string + - name: binaryData + type: + map: + elementType: + scalar: string + - name: data + type: + map: + elementType: + scalar: string + - name: immutable + type: + scalar: boolean + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} +- name: io.k8s.api.core.v1.ConfigMapEnvSource + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.ConfigMapKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.ConfigMapNodeConfigSource + map: + fields: + - name: kubeletConfigKey + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + default: "" + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.api.core.v1.ConfigMapProjection + map: + fields: + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: name + type: + scalar: string + default: "" - name: optional type: scalar: boolean @@ -3828,6 +4879,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -3888,10 +4940,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements default: {} + - name: restartPolicy + type: + scalar: string - name: securityContext type: namedType: io.k8s.api.core.v1.SecurityContext @@ -3964,6 +5025,17 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: TCP +- name: io.k8s.api.core.v1.ContainerResizePolicy + map: + fields: + - name: resourceName + type: + scalar: string + default: "" + - name: restartPolicy + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ContainerState map: fields: @@ -3982,7 +5054,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateTerminated map: fields: @@ -3996,7 +5067,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: finishedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4009,7 +5079,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.core.v1.ContainerStateWaiting map: fields: @@ -4022,6 +5091,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ContainerStatus map: fields: + - name: allocatedResources + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: allocatedResourcesStatus + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceStatus + elementRelationship: associative + keys: + - name - name: containerID type: scalar: string @@ -4045,6 +5127,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: boolean default: false + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements - name: restartCount type: scalar: numeric @@ -4056,6 +5141,23 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.ContainerState default: {} + - name: user + type: + namedType: io.k8s.api.core.v1.ContainerUser + - name: volumeMounts + type: + list: + elementType: + namedType: io.k8s.api.core.v1.VolumeMountStatus + elementRelationship: associative + keys: + - mountPath +- name: io.k8s.api.core.v1.ContainerUser + map: + fields: + - name: linux + type: + namedType: io.k8s.api.core.v1.LinuxContainerUser - name: io.k8s.api.core.v1.DaemonEndpoint map: fields: @@ -4280,10 +5382,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: readinessProbe type: namedType: io.k8s.api.core.v1.Probe + - name: resizePolicy + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ContainerResizePolicy + elementRelationship: atomic - name: resources type: namedType: io.k8s.api.core.v1.ResourceRequirements default: {} + - name: restartPolicy + type: + scalar: string - name: securityContext type: namedType: io.k8s.api.core.v1.SecurityContext @@ -4348,11 +5459,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: firstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: involvedObject type: namedType: io.k8s.api.core.v1.ObjectReference @@ -4363,7 +5472,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4404,7 +5512,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.core.v1.EventSource map: fields: @@ -4587,7 +5694,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: scheme type: scalar: string @@ -4614,6 +5720,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" +- name: io.k8s.api.core.v1.HostIP + map: + fields: + - name: ip + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.HostPathVolumeSource map: fields: @@ -4646,6 +5760,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -4688,6 +5803,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -4708,6 +5824,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ImageVolumeSource + map: + fields: + - name: pullPolicy + type: + scalar: string + - name: reference + type: + scalar: string - name: io.k8s.api.core.v1.KeyToPath map: fields: @@ -4740,6 +5865,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: httpGet type: namedType: io.k8s.api.core.v1.HTTPGetAction + - name: sleep + type: + namedType: io.k8s.api.core.v1.SleepAction - name: tcpSocket type: namedType: io.k8s.api.core.v1.TCPSocketAction @@ -4801,6 +5929,23 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.LimitRangeItem elementRelationship: atomic +- name: io.k8s.api.core.v1.LinuxContainerUser + map: + fields: + - name: gid + type: + scalar: numeric + default: 0 + - name: supplementalGroups + type: + list: + elementType: + scalar: numeric + elementRelationship: atomic + - name: uid + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.LoadBalancerIngress map: fields: @@ -4810,6 +5955,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + - name: ipMode + type: + scalar: string - name: ports type: list: @@ -4831,6 +5979,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" elementRelationship: atomic - name: io.k8s.api.core.v1.LocalVolumeSource map: @@ -4842,6 +5991,16 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ModifyVolumeStatus + map: + fields: + - name: status + type: + scalar: string + default: "" + - name: targetVolumeAttributesClassName + type: + scalar: string - name: io.k8s.api.core.v1.NFSVolumeSource map: fields: @@ -4883,7 +6042,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -4971,11 +6129,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastHeartbeatTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5018,6 +6174,31 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.DaemonEndpoint default: {} +- name: io.k8s.api.core.v1.NodeFeatures + map: + fields: + - name: supplementalGroupsPolicy + type: + scalar: boolean +- name: io.k8s.api.core.v1.NodeRuntimeHandler + map: + fields: + - name: features + type: + namedType: io.k8s.api.core.v1.NodeRuntimeHandlerFeatures + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.NodeRuntimeHandlerFeatures + map: + fields: + - name: recursiveReadOnlyMounts + type: + scalar: boolean + - name: userNamespaces + type: + scalar: boolean - name: io.k8s.api.core.v1.NodeSelector map: fields: @@ -5127,6 +6308,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.NodeDaemonEndpoints default: {} + - name: features + type: + namedType: io.k8s.api.core.v1.NodeFeatures - name: images type: list: @@ -5140,6 +6324,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: phase type: scalar: string + - name: runtimeHandlers + type: + list: + elementType: + namedType: io.k8s.api.core.v1.NodeRuntimeHandler + elementRelationship: atomic - name: volumesAttached type: list: @@ -5279,11 +6469,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5312,10 +6500,10 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.api.core.v1.TypedLocalObjectReference - name: dataSourceRef type: - namedType: io.k8s.api.core.v1.TypedLocalObjectReference + namedType: io.k8s.api.core.v1.TypedObjectReference - name: resources type: - namedType: io.k8s.api.core.v1.ResourceRequirements + namedType: io.k8s.api.core.v1.VolumeResourceRequirements default: {} - name: selector type: @@ -5323,6 +6511,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageClassName type: scalar: string + - name: volumeAttributesClassName + type: + scalar: string - name: volumeMode type: scalar: string @@ -5338,6 +6529,12 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: allocatedResourceStatuses + type: + map: + elementType: + scalar: string + elementRelationship: separable - name: allocatedResources type: map: @@ -5356,10 +6553,13 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type - - name: phase + - name: currentVolumeAttributesClassName type: scalar: string - - name: resizeStatus + - name: modifyVolumeStatus + type: + namedType: io.k8s.api.core.v1.ModifyVolumeStatus + - name: phase type: scalar: string - name: io.k8s.api.core.v1.PersistentVolumeClaimTemplate @@ -5415,6 +6615,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: claimRef type: namedType: io.k8s.api.core.v1.ObjectReference + elementRelationship: separable - name: csi type: namedType: io.k8s.api.core.v1.CSIPersistentVolumeSource @@ -5478,6 +6679,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageos type: namedType: io.k8s.api.core.v1.StorageOSPersistentVolumeSource + - name: volumeAttributesClassName + type: + scalar: string - name: volumeMode type: scalar: string @@ -5487,6 +6691,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.PersistentVolumeStatus map: fields: + - name: lastPhaseTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - name: message type: scalar: string @@ -5548,6 +6755,18 @@ var schemaYAML = typed.YAMLObject(`types: - name: labelSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: mismatchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: namespaceSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector @@ -5582,11 +6801,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -5637,6 +6854,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.PodOS map: fields: @@ -5651,9 +6869,42 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.PodResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resourceClaimName + type: + scalar: string + - name: resourceClaimTemplateName + type: + scalar: string +- name: io.k8s.api.core.v1.PodResourceClaimStatus + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resourceClaimName + type: + scalar: string +- name: io.k8s.api.core.v1.PodSchedulingGate + map: + fields: + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.PodSecurityContext map: fields: + - name: appArmorProfile + type: + namedType: io.k8s.api.core.v1.AppArmorProfile - name: fsGroup type: scalar: numeric @@ -5681,6 +6932,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: numeric elementRelationship: atomic + - name: supplementalGroupsPolicy + type: + scalar: string - name: sysctls type: list: @@ -5744,6 +6998,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostPID type: scalar: boolean + - name: hostUsers + type: + scalar: boolean - name: hostname type: scalar: string @@ -5795,6 +7052,14 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.PodReadinessGate elementRelationship: atomic + - name: resourceClaims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodResourceClaim + elementRelationship: associative + keys: + - name - name: restartPolicy type: scalar: string @@ -5804,6 +7069,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: schedulerName type: scalar: string + - name: schedulingGates + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodSchedulingGate + elementRelationship: associative + keys: + - name - name: securityContext type: namedType: io.k8s.api.core.v1.PodSecurityContext @@ -5874,6 +7147,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostIP type: scalar: string + - name: hostIPs + type: + list: + elementType: + namedType: io.k8s.api.core.v1.HostIP + elementRelationship: atomic - name: initContainerStatuses type: list: @@ -5906,6 +7185,17 @@ var schemaYAML = typed.YAMLObject(`types: - name: reason type: scalar: string + - name: resize + type: + scalar: string + - name: resourceClaimStatuses + type: + list: + elementType: + namedType: io.k8s.api.core.v1.PodResourceClaimStatus + elementRelationship: associative + keys: + - name - name: startTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time @@ -6056,6 +7346,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -6065,6 +7356,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -6074,6 +7366,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.RBDVolumeSource map: fields: @@ -6087,6 +7380,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -6096,6 +7390,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -6105,6 +7400,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.ReplicationController map: fields: @@ -6132,7 +7428,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -6192,6 +7487,16 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 +- name: io.k8s.api.core.v1.ResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: request + type: + scalar: string - name: io.k8s.api.core.v1.ResourceFieldSelector map: fields: @@ -6201,12 +7506,21 @@ var schemaYAML = typed.YAMLObject(`types: - name: divisor type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - default: {} - name: resource type: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceHealth + map: + fields: + - name: health + type: + scalar: string + - name: resourceID + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ResourceQuota map: fields: @@ -6261,6 +7575,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ResourceRequirements map: fields: + - name: claims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceClaim + elementRelationship: associative + keys: + - name - name: limits type: map: @@ -6271,6 +7593,21 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.ResourceStatus + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resources + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceHealth + elementRelationship: associative + keys: + - resourceID - name: io.k8s.api.core.v1.SELinuxOptions map: fields: @@ -6292,6 +7629,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -6311,6 +7649,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -6327,6 +7666,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -6346,6 +7686,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -6433,6 +7774,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -6446,6 +7788,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -6462,6 +7805,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string + default: "" - name: optional type: scalar: boolean @@ -6499,6 +7843,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: allowPrivilegeEscalation type: scalar: boolean + - name: appArmorProfile + type: + namedType: io.k8s.api.core.v1.AppArmorProfile - name: capabilities type: namedType: io.k8s.api.core.v1.Capabilities @@ -6616,7 +7963,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetPort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.ServiceSpec map: fields: @@ -6695,6 +8041,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: sessionAffinityConfig type: namedType: io.k8s.api.core.v1.SessionAffinityConfig + - name: trafficDistribution + type: + scalar: string - name: type type: scalar: string @@ -6719,6 +8068,13 @@ var schemaYAML = typed.YAMLObject(`types: - name: clientIP type: namedType: io.k8s.api.core.v1.ClientIPConfig +- name: io.k8s.api.core.v1.SleepAction + map: + fields: + - name: seconds + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.StorageOSPersistentVolumeSource map: fields: @@ -6775,7 +8131,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.core.v1.Taint map: fields: @@ -6840,10 +8195,25 @@ var schemaYAML = typed.YAMLObject(`types: - name: labelSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: matchLabelKeys + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: maxSkew type: scalar: numeric default: 0 + - name: minDomains + type: + scalar: numeric + - name: nodeAffinityPolicy + type: + scalar: string + - name: nodeTaintsPolicy + type: + scalar: string - name: topologyKey type: scalar: string @@ -6867,6 +8237,23 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.TypedObjectReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string - name: io.k8s.api.core.v1.Volume map: fields: @@ -6921,6 +8308,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostPath type: namedType: io.k8s.api.core.v1.HostPathVolumeSource + - name: image + type: + namedType: io.k8s.api.core.v1.ImageVolumeSource - name: iscsi type: namedType: io.k8s.api.core.v1.ISCSIVolumeSource @@ -6971,17 +8361,40 @@ var schemaYAML = typed.YAMLObject(`types: - name: name type: scalar: string - default: "" -- name: io.k8s.api.core.v1.VolumeMount + default: "" +- name: io.k8s.api.core.v1.VolumeMount + map: + fields: + - name: mountPath + type: + scalar: string + default: "" + - name: mountPropagation + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: readOnly + type: + scalar: boolean + - name: recursiveReadOnly + type: + scalar: string + - name: subPath + type: + scalar: string + - name: subPathExpr + type: + scalar: string +- name: io.k8s.api.core.v1.VolumeMountStatus map: fields: - name: mountPath type: scalar: string default: "" - - name: mountPropagation - type: - scalar: string - name: name type: scalar: string @@ -6989,10 +8402,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: readOnly type: scalar: boolean - - name: subPath - type: - scalar: string - - name: subPathExpr + - name: recursiveReadOnly type: scalar: string - name: io.k8s.api.core.v1.VolumeNodeAffinity @@ -7004,6 +8414,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.VolumeProjection map: fields: + - name: clusterTrustBundle + type: + namedType: io.k8s.api.core.v1.ClusterTrustBundleProjection - name: configMap type: namedType: io.k8s.api.core.v1.ConfigMapProjection @@ -7016,6 +8429,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: serviceAccountToken type: namedType: io.k8s.api.core.v1.ServiceAccountTokenProjection +- name: io.k8s.api.core.v1.VolumeResourceRequirements + map: + fields: + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource map: fields: @@ -7281,11 +8707,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -7293,7 +8717,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: kind type: scalar: string @@ -7336,7 +8759,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - default: {} - name: io.k8s.api.events.v1beta1.Event map: fields: @@ -7345,93 +8767,333 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string - name: apiVersion type: - scalar: string - - name: deprecatedCount + scalar: string + - name: deprecatedCount + type: + scalar: numeric + - name: deprecatedFirstTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deprecatedLastTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deprecatedSource + type: + namedType: io.k8s.api.core.v1.EventSource + default: {} + - name: eventTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: note + type: + scalar: string + - name: reason + type: + scalar: string + - name: regarding + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: related + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: reportingController + type: + scalar: string + - name: reportingInstance + type: + scalar: string + - name: series + type: + namedType: io.k8s.api.events.v1beta1.EventSeries + - name: type + type: + scalar: string +- name: io.k8s.api.events.v1beta1.EventSeries + map: + fields: + - name: count + type: + scalar: numeric + default: 0 + - name: lastObservedTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime +- name: io.k8s.api.extensions.v1beta1.DaemonSet + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.extensions.v1beta1.DaemonSetSpec + default: {} + - name: status + type: + namedType: io.k8s.api.extensions.v1beta1.DaemonSetStatus + default: {} +- name: io.k8s.api.extensions.v1beta1.DaemonSetCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.extensions.v1beta1.DaemonSetSpec + map: + fields: + - name: minReadySeconds + type: + scalar: numeric + - name: revisionHistoryLimit + type: + scalar: numeric + - name: selector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: template + type: + namedType: io.k8s.api.core.v1.PodTemplateSpec + default: {} + - name: templateGeneration + type: + scalar: numeric + - name: updateStrategy + type: + namedType: io.k8s.api.extensions.v1beta1.DaemonSetUpdateStrategy + default: {} +- name: io.k8s.api.extensions.v1beta1.DaemonSetStatus + map: + fields: + - name: collisionCount + type: + scalar: numeric + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.DaemonSetCondition + elementRelationship: associative + keys: + - type + - name: currentNumberScheduled + type: + scalar: numeric + default: 0 + - name: desiredNumberScheduled + type: + scalar: numeric + default: 0 + - name: numberAvailable + type: + scalar: numeric + - name: numberMisscheduled + type: + scalar: numeric + default: 0 + - name: numberReady + type: + scalar: numeric + default: 0 + - name: numberUnavailable + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: updatedNumberScheduled + type: + scalar: numeric +- name: io.k8s.api.extensions.v1beta1.DaemonSetUpdateStrategy + map: + fields: + - name: rollingUpdate + type: + namedType: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet + - name: type + type: + scalar: string +- name: io.k8s.api.extensions.v1beta1.Deployment + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.extensions.v1beta1.DeploymentSpec + default: {} + - name: status + type: + namedType: io.k8s.api.extensions.v1beta1.DeploymentStatus + default: {} +- name: io.k8s.api.extensions.v1beta1.DeploymentCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.extensions.v1beta1.DeploymentSpec + map: + fields: + - name: minReadySeconds + type: + scalar: numeric + - name: paused + type: + scalar: boolean + - name: progressDeadlineSeconds + type: + scalar: numeric + - name: replicas + type: + scalar: numeric + - name: revisionHistoryLimit type: scalar: numeric - - name: deprecatedFirstTimestamp + - name: rollbackTo type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: deprecatedLastTimestamp + namedType: io.k8s.api.extensions.v1beta1.RollbackConfig + - name: selector type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: deprecatedSource + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: strategy type: - namedType: io.k8s.api.core.v1.EventSource + namedType: io.k8s.api.extensions.v1beta1.DeploymentStrategy default: {} - - name: eventTime + - name: template type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} - - name: kind +- name: io.k8s.api.extensions.v1beta1.DeploymentStatus + map: + fields: + - name: availableReplicas type: - scalar: string - - name: metadata + scalar: numeric + - name: collisionCount type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: note + scalar: numeric + - name: conditions type: - scalar: string - - name: reason + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.DeploymentCondition + elementRelationship: associative + keys: + - type + - name: observedGeneration type: - scalar: string - - name: regarding + scalar: numeric + - name: readyReplicas type: - namedType: io.k8s.api.core.v1.ObjectReference - default: {} - - name: related + scalar: numeric + - name: replicas type: - namedType: io.k8s.api.core.v1.ObjectReference - - name: reportingController + scalar: numeric + - name: unavailableReplicas type: - scalar: string - - name: reportingInstance + scalar: numeric + - name: updatedReplicas type: - scalar: string - - name: series + scalar: numeric +- name: io.k8s.api.extensions.v1beta1.DeploymentStrategy + map: + fields: + - name: rollingUpdate type: - namedType: io.k8s.api.events.v1beta1.EventSeries + namedType: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment - name: type type: scalar: string -- name: io.k8s.api.events.v1beta1.EventSeries +- name: io.k8s.api.extensions.v1beta1.HTTPIngressPath map: fields: - - name: count - type: - scalar: numeric - default: 0 - - name: lastObservedTime + - name: backend type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + namedType: io.k8s.api.extensions.v1beta1.IngressBackend default: {} -- name: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - map: - fields: - - name: name + - name: path type: scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedFlexVolume + - name: pathType + type: + scalar: string +- name: io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue map: fields: - - name: driver + - name: paths type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.AllowedHostPath + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.HTTPIngressPath + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.IPBlock map: fields: - - name: pathPrefix + - name: cidr type: scalar: string - - name: readOnly + default: "" + - name: except type: - scalar: boolean -- name: io.k8s.api.extensions.v1beta1.DaemonSet + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.Ingress map: fields: - name: apiVersion @@ -7446,108 +9108,208 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.DaemonSetSpec + namedType: io.k8s.api.extensions.v1beta1.IngressSpec default: {} - name: status type: - namedType: io.k8s.api.extensions.v1beta1.DaemonSetStatus + namedType: io.k8s.api.extensions.v1beta1.IngressStatus default: {} -- name: io.k8s.api.extensions.v1beta1.DaemonSetCondition +- name: io.k8s.api.extensions.v1beta1.IngressBackend map: fields: - - name: lastTransitionTime + - name: resource type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: message + namedType: io.k8s.api.core.v1.TypedLocalObjectReference + - name: serviceName type: scalar: string - - name: reason + - name: servicePort + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress + map: + fields: + - name: hostname type: scalar: string - - name: status + - name: ip type: scalar: string - default: "" - - name: type + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IngressPortStatus + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerStatus + map: + fields: + - name: ingress + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.IngressPortStatus + map: + fields: + - name: error + type: + scalar: string + - name: port + type: + scalar: numeric + default: 0 + - name: protocol type: scalar: string default: "" -- name: io.k8s.api.extensions.v1beta1.DaemonSetSpec +- name: io.k8s.api.extensions.v1beta1.IngressRule map: fields: - - name: minReadySeconds + - name: host type: - scalar: numeric - - name: revisionHistoryLimit + scalar: string + - name: http type: - scalar: numeric - - name: selector + namedType: io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue +- name: io.k8s.api.extensions.v1beta1.IngressSpec + map: + fields: + - name: backend type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: template + namedType: io.k8s.api.extensions.v1beta1.IngressBackend + - name: ingressClassName type: - namedType: io.k8s.api.core.v1.PodTemplateSpec + scalar: string + - name: rules + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IngressRule + elementRelationship: atomic + - name: tls + type: + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.IngressTLS + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.IngressStatus + map: + fields: + - name: loadBalancer + type: + namedType: io.k8s.api.extensions.v1beta1.IngressLoadBalancerStatus default: {} - - name: templateGeneration +- name: io.k8s.api.extensions.v1beta1.IngressTLS + map: + fields: + - name: hosts type: - scalar: numeric - - name: updateStrategy + list: + elementType: + scalar: string + elementRelationship: atomic + - name: secretName type: - namedType: io.k8s.api.extensions.v1beta1.DaemonSetUpdateStrategy + scalar: string +- name: io.k8s.api.extensions.v1beta1.NetworkPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} -- name: io.k8s.api.extensions.v1beta1.DaemonSetStatus + - name: spec + type: + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicySpec + default: {} +- name: io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule map: fields: - - name: collisionCount + - name: ports type: - scalar: numeric - - name: conditions + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPort + elementRelationship: atomic + - name: to type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.DaemonSetCondition - elementRelationship: associative - keys: - - type - - name: currentNumberScheduled + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPeer + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule + map: + fields: + - name: from type: - scalar: numeric - default: 0 - - name: desiredNumberScheduled + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPeer + elementRelationship: atomic + - name: ports type: - scalar: numeric - default: 0 - - name: numberAvailable + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPort + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.NetworkPolicyPeer + map: + fields: + - name: ipBlock type: - scalar: numeric - - name: numberMisscheduled + namedType: io.k8s.api.extensions.v1beta1.IPBlock + - name: namespaceSelector type: - scalar: numeric - default: 0 - - name: numberReady + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: podSelector type: - scalar: numeric - default: 0 - - name: numberUnavailable + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector +- name: io.k8s.api.extensions.v1beta1.NetworkPolicyPort + map: + fields: + - name: endPort type: scalar: numeric - - name: observedGeneration + - name: port type: - scalar: numeric - - name: updatedNumberScheduled + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: protocol type: - scalar: numeric -- name: io.k8s.api.extensions.v1beta1.DaemonSetUpdateStrategy + scalar: string +- name: io.k8s.api.extensions.v1beta1.NetworkPolicySpec map: fields: - - name: rollingUpdate + - name: egress type: - namedType: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet - - name: type + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule + elementRelationship: atomic + - name: ingress type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.Deployment + list: + elementType: + namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule + elementRelationship: atomic + - name: podSelector + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + default: {} + - name: policyTypes + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.extensions.v1beta1.ReplicaSet map: fields: - name: apiVersion @@ -7562,23 +9324,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.DeploymentSpec + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetSpec default: {} - name: status type: - namedType: io.k8s.api.extensions.v1beta1.DeploymentStatus + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetStatus default: {} -- name: io.k8s.api.extensions.v1beta1.DeploymentCondition +- name: io.k8s.api.extensions.v1beta1.ReplicaSetCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - - name: lastUpdateTime - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -7593,55 +9350,39 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.extensions.v1beta1.DeploymentSpec +- name: io.k8s.api.extensions.v1beta1.ReplicaSetSpec map: fields: - name: minReadySeconds type: scalar: numeric - - name: paused - type: - scalar: boolean - - name: progressDeadlineSeconds - type: - scalar: numeric - name: replicas type: scalar: numeric - - name: revisionHistoryLimit - type: - scalar: numeric - - name: rollbackTo - type: - namedType: io.k8s.api.extensions.v1beta1.RollbackConfig - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: strategy - type: - namedType: io.k8s.api.extensions.v1beta1.DeploymentStrategy - default: {} - name: template type: namedType: io.k8s.api.core.v1.PodTemplateSpec default: {} -- name: io.k8s.api.extensions.v1beta1.DeploymentStatus +- name: io.k8s.api.extensions.v1beta1.ReplicaSetStatus map: fields: - name: availableReplicas type: scalar: numeric - - name: collisionCount - type: - scalar: numeric - name: conditions type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.DeploymentCondition + namedType: io.k8s.api.extensions.v1beta1.ReplicaSetCondition elementRelationship: associative keys: - type + - name: fullyLabeledReplicas + type: + scalar: numeric - name: observedGeneration type: scalar: numeric @@ -7651,174 +9392,48 @@ var schemaYAML = typed.YAMLObject(`types: - name: replicas type: scalar: numeric - - name: unavailableReplicas - type: - scalar: numeric - - name: updatedReplicas - type: - scalar: numeric -- name: io.k8s.api.extensions.v1beta1.DeploymentStrategy - map: - fields: - - name: rollingUpdate - type: - namedType: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment - - name: type - type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.HTTPIngressPath - map: - fields: - - name: backend - type: - namedType: io.k8s.api.extensions.v1beta1.IngressBackend - default: {} - - name: path - type: - scalar: string - - name: pathType - type: - scalar: string -- name: io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue - map: - fields: - - name: paths - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.HTTPIngressPath - elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric default: 0 -- name: io.k8s.api.extensions.v1beta1.IDRange +- name: io.k8s.api.extensions.v1beta1.RollbackConfig map: fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min + - name: revision type: scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.IPBlock - map: - fields: - - name: cidr - type: - scalar: string - default: "" - - name: except - type: - list: - elementType: - scalar: string - elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.Ingress - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.extensions.v1beta1.IngressSpec - default: {} - - name: status - type: - namedType: io.k8s.api.extensions.v1beta1.IngressStatus - default: {} -- name: io.k8s.api.extensions.v1beta1.IngressBackend +- name: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet map: fields: - - name: resource - type: - namedType: io.k8s.api.core.v1.TypedLocalObjectReference - - name: serviceName - type: - scalar: string - - name: servicePort + - name: maxSurge type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} -- name: io.k8s.api.extensions.v1beta1.IngressRule - map: - fields: - - name: host - type: - scalar: string - - name: http + - name: maxUnavailable type: - namedType: io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue -- name: io.k8s.api.extensions.v1beta1.IngressSpec + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment map: fields: - - name: backend - type: - namedType: io.k8s.api.extensions.v1beta1.IngressBackend - - name: ingressClassName - type: - scalar: string - - name: rules - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IngressRule - elementRelationship: atomic - - name: tls + - name: maxSurge type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IngressTLS - elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.IngressStatus - map: - fields: - - name: loadBalancer + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + - name: maxUnavailable type: - namedType: io.k8s.api.core.v1.LoadBalancerStatus - default: {} -- name: io.k8s.api.extensions.v1beta1.IngressTLS + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration map: fields: - - name: hosts + - name: lendablePercent type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: secretName + scalar: numeric + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod + map: + fields: + - name: type type: scalar: string -- name: io.k8s.api.extensions.v1beta1.NetworkPolicy + default: "" +- name: io.k8s.api.flowcontrol.v1.FlowSchema map: fields: - name: apiVersion @@ -7833,217 +9448,136 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicySpec + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaSpec default: {} -- name: io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule - map: - fields: - - name: ports - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPort - elementRelationship: atomic - - name: to - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPeer - elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule - map: - fields: - - name: from - type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPeer - elementRelationship: atomic - - name: ports + - name: status type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyPort - elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.NetworkPolicyPeer + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaStatus + default: {} +- name: io.k8s.api.flowcontrol.v1.FlowSchemaCondition map: fields: - - name: ipBlock - type: - namedType: io.k8s.api.extensions.v1beta1.IPBlock - - name: namespaceSelector + - name: lastTransitionTime type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: podSelector + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector -- name: io.k8s.api.extensions.v1beta1.NetworkPolicyPort - map: - fields: - - name: endPort + scalar: string + - name: reason type: - scalar: numeric - - name: port + scalar: string + - name: status type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: protocol + scalar: string + - name: type type: scalar: string -- name: io.k8s.api.extensions.v1beta1.NetworkPolicySpec +- name: io.k8s.api.flowcontrol.v1.FlowSchemaSpec map: fields: - - name: egress + - name: distinguisherMethod type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule - elementRelationship: atomic - - name: ingress + namedType: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod + - name: matchingPrecedence type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule - elementRelationship: atomic - - name: podSelector + scalar: numeric + default: 0 + - name: priorityLevelConfiguration type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference default: {} - - name: policyTypes + - name: rules type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicy +- name: io.k8s.api.flowcontrol.v1.FlowSchemaStatus map: fields: - - name: apiVersion + - name: conditions type: - scalar: string - - name: kind + list: + elementType: + namedType: io.k8s.api.flowcontrol.v1.FlowSchemaCondition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.flowcontrol.v1.GroupSubject + map: + fields: + - name: name type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec + default: "" +- name: io.k8s.api.flowcontrol.v1.LimitResponse map: fields: - - name: allowPrivilegeEscalation - type: - scalar: boolean - - name: allowedCSIDrivers + - name: queuing type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedCSIDriver - elementRelationship: atomic - - name: allowedCapabilities + namedType: io.k8s.api.flowcontrol.v1.QueuingConfiguration + - name: type type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedFlexVolumes + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: queuing + discriminatorValue: Queuing +- name: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration + map: + fields: + - name: borrowingLimitPercent type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedFlexVolume - elementRelationship: atomic - - name: allowedHostPaths + scalar: numeric + - name: lendablePercent type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.AllowedHostPath - elementRelationship: atomic - - name: allowedProcMountTypes + scalar: numeric + - name: limitResponse type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: allowedUnsafeSysctls + namedType: io.k8s.api.flowcontrol.v1.LimitResponse + default: {} + - name: nominalConcurrencyShares type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultAddCapabilities + scalar: numeric +- name: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule + map: + fields: + - name: nonResourceURLs type: list: elementType: scalar: string - elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation - type: - scalar: boolean - - name: forbiddenSysctls + elementRelationship: associative + - name: verbs type: list: elementType: scalar: string - elementRelationship: atomic - - name: fsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC - type: - scalar: boolean - - name: hostNetwork - type: - scalar: boolean - - name: hostPID - type: - scalar: boolean - - name: hostPorts + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects + map: + fields: + - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.HostPortRange + namedType: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule elementRelationship: atomic - - name: privileged - type: - scalar: boolean - - name: readOnlyRootFilesystem - type: - scalar: boolean - - name: requiredDropCapabilities + - name: resourceRules type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1.ResourcePolicyRule elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser - type: - namedType: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass - type: - namedType: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions - - name: seLinux - type: - namedType: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions - default: {} - - name: supplementalGroups - type: - namedType: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions - default: {} - - name: volumes + - name: subjects type: list: elementType: - scalar: string + namedType: io.k8s.api.flowcontrol.v1.Subject elementRelationship: atomic -- name: io.k8s.api.extensions.v1beta1.ReplicaSet +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8058,19 +9592,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetSpec + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.extensions.v1beta1.ReplicaSetCondition +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8080,146 +9613,152 @@ var schemaYAML = typed.YAMLObject(`types: - name: status type: scalar: string - default: "" - name: type type: scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.ReplicaSetSpec +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference map: fields: - - name: minReadySeconds + - name: name type: - scalar: numeric - - name: replicas + scalar: string + default: "" +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec + map: + fields: + - name: exempt type: - scalar: numeric - - name: selector + namedType: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration + - name: limited type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: template + namedType: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration + - name: type type: - namedType: io.k8s.api.core.v1.PodTemplateSpec - default: {} -- name: io.k8s.api.extensions.v1beta1.ReplicaSetStatus + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: exempt + discriminatorValue: Exempt + - fieldName: limited + discriminatorValue: Limited +- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus map: fields: - - name: availableReplicas - type: - scalar: numeric - name: conditions type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.ReplicaSetCondition + namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type - - name: fullyLabeledReplicas - type: - scalar: numeric - - name: observedGeneration - type: - scalar: numeric - - name: readyReplicas - type: - scalar: numeric - - name: replicas - type: - scalar: numeric - default: 0 -- name: io.k8s.api.extensions.v1beta1.RollbackConfig +- name: io.k8s.api.flowcontrol.v1.QueuingConfiguration map: fields: - - name: revision + - name: handSize type: scalar: numeric -- name: io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet - map: - fields: - - name: maxSurge + default: 0 + - name: queueLengthLimit type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + scalar: numeric + default: 0 + - name: queues type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RollingUpdateDeployment + scalar: numeric + default: 0 +- name: io.k8s.api.flowcontrol.v1.ResourcePolicyRule map: fields: - - name: maxSurge + - name: apiGroups type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - - name: maxUnavailable + list: + elementType: + scalar: string + elementRelationship: associative + - name: clusterScope type: - namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.extensions.v1beta1.RunAsGroupStrategyOptions - map: - fields: - - name: ranges + scalar: boolean + - name: namespaces type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: string + elementRelationship: associative + - name: resources type: - scalar: string - default: "" -- name: io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges + list: + elementType: + scalar: string + elementRelationship: associative + - name: verbs type: list: elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: string + elementRelationship: associative +- name: io.k8s.api.flowcontrol.v1.ServiceAccountSubject + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: namespace type: scalar: string default: "" -- name: io.k8s.api.extensions.v1beta1.RuntimeClassStrategyOptions +- name: io.k8s.api.flowcontrol.v1.Subject map: fields: - - name: allowedRuntimeClassNames + - name: group type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: defaultRuntimeClassName + namedType: io.k8s.api.flowcontrol.v1.GroupSubject + - name: kind type: scalar: string -- name: io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions + default: "" + - name: serviceAccount + type: + namedType: io.k8s.api.flowcontrol.v1.ServiceAccountSubject + - name: user + type: + namedType: io.k8s.api.flowcontrol.v1.UserSubject + unions: + - discriminator: kind + fields: + - fieldName: group + discriminatorValue: Group + - fieldName: serviceAccount + discriminatorValue: ServiceAccount + - fieldName: user + discriminatorValue: User +- name: io.k8s.api.flowcontrol.v1.UserSubject map: fields: - - name: rule + - name: name type: scalar: string default: "" - - name: seLinuxOptions - type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions +- name: io.k8s.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration map: fields: - - name: ranges + - name: lendablePercent type: - list: - elementType: - namedType: io.k8s.api.extensions.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: numeric + - name: nominalConcurrencyShares type: - scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + scalar: numeric +- name: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchema map: fields: - name: apiVersion @@ -8234,19 +9773,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8259,50 +9797,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta1.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta1.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration - name: type type: scalar: string @@ -8312,18 +9850,24 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration map: fields: - name: assuredConcurrencyShares type: scalar: numeric default: 0 + - name: borrowingLimitPercent + type: + scalar: numeric + - name: lendablePercent + type: + scalar: numeric - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta1.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -8338,28 +9882,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.Subject + namedType: io.k8s.api.flowcontrol.v1beta1.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8374,19 +9918,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8399,19 +9942,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec map: fields: + - name: exempt + type: + namedType: io.k8s.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -8419,20 +9965,22 @@ var schemaYAML = typed.YAMLObject(`types: unions: - discriminator: type fields: + - fieldName: exempt + discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration map: fields: - name: handSize @@ -8447,7 +9995,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule map: fields: - name: apiGroups @@ -8477,7 +10025,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject map: fields: - name: name @@ -8488,22 +10036,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1alpha1.Subject +- name: io.k8s.api.flowcontrol.v1beta1.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta1.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta1.UserSubject unions: - discriminator: kind fields: @@ -8513,21 +10061,30 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject +- name: io.k8s.api.flowcontrol.v1beta1.UserSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration + map: + fields: + - name: lendablePercent + type: + scalar: numeric + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchema map: fields: - name: apiVersion @@ -8542,19 +10099,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8567,50 +10123,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta1.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta2.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta2.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration - name: type type: scalar: string @@ -8620,18 +10176,24 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration map: fields: - name: assuredConcurrencyShares type: scalar: numeric default: 0 + - name: borrowingLimitPercent + type: + scalar: numeric + - name: lendablePercent + type: + scalar: numeric - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1beta1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta2.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -8646,28 +10208,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.Subject + namedType: io.k8s.api.flowcontrol.v1beta2.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8682,19 +10244,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8707,19 +10268,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec map: fields: + - name: exempt + type: + namedType: io.k8s.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -8727,20 +10291,22 @@ var schemaYAML = typed.YAMLObject(`types: unions: - discriminator: type fields: + - fieldName: exempt + discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration map: fields: - name: handSize @@ -8755,7 +10321,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule map: fields: - name: apiGroups @@ -8785,7 +10351,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject map: fields: - name: name @@ -8796,22 +10362,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta1.Subject +- name: io.k8s.api.flowcontrol.v1beta2.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1beta1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta2.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1beta1.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta2.UserSubject unions: - discriminator: kind fields: @@ -8821,21 +10387,30 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1beta1.UserSubject +- name: io.k8s.api.flowcontrol.v1beta2.UserSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration + map: + fields: + - name: lendablePercent + type: + scalar: numeric + - name: nominalConcurrencyShares + type: + scalar: numeric +- name: io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchema +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchema map: fields: - name: apiVersion @@ -8850,19 +10425,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -8875,50 +10449,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta2.GroupSubject +- name: io.k8s.api.flowcontrol.v1beta3.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.LimitResponse +- name: io.k8s.api.flowcontrol.v1beta3.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration - name: type type: scalar: string @@ -8928,18 +10502,24 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration map: fields: - - name: assuredConcurrencyShares + - name: borrowingLimitPercent + type: + scalar: numeric + - name: lendablePercent type: scalar: numeric - default: 0 - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1beta2.LimitResponse + namedType: io.k8s.api.flowcontrol.v1beta3.LimitResponse default: {} -- name: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule + - name: nominalConcurrencyShares + type: + scalar: numeric + default: 0 +- name: io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -8954,28 +10534,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.Subject + namedType: io.k8s.api.flowcontrol.v1beta3.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -8990,19 +10570,18 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -9015,19 +10594,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec map: fields: + - name: exempt + type: + namedType: io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -9035,20 +10617,22 @@ var schemaYAML = typed.YAMLObject(`types: unions: - discriminator: type fields: + - fieldName: exempt + discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration map: fields: - name: handSize @@ -9063,7 +10647,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule map: fields: - name: apiGroups @@ -9093,7 +10677,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject map: fields: - name: name @@ -9104,22 +10688,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1beta2.Subject +- name: io.k8s.api.flowcontrol.v1beta3.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1beta2.GroupSubject + namedType: io.k8s.api.flowcontrol.v1beta3.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1beta2.UserSubject + namedType: io.k8s.api.flowcontrol.v1beta3.UserSubject unions: - discriminator: kind fields: @@ -9129,7 +10713,7 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1beta2.UserSubject +- name: io.k8s.api.flowcontrol.v1beta3.UserSubject map: fields: - name: name @@ -9306,6 +10890,44 @@ var schemaYAML = typed.YAMLObject(`types: - name: parameters type: namedType: io.k8s.api.networking.v1.IngressClassParametersReference +- name: io.k8s.api.networking.v1.IngressLoadBalancerIngress + map: + fields: + - name: hostname + type: + scalar: string + - name: ip + type: + scalar: string + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.networking.v1.IngressPortStatus + elementRelationship: atomic +- name: io.k8s.api.networking.v1.IngressLoadBalancerStatus + map: + fields: + - name: ingress + type: + list: + elementType: + namedType: io.k8s.api.networking.v1.IngressLoadBalancerIngress + elementRelationship: atomic +- name: io.k8s.api.networking.v1.IngressPortStatus + map: + fields: + - name: error + type: + scalar: string + - name: port + type: + scalar: numeric + default: 0 + - name: protocol + type: + scalar: string + default: "" - name: io.k8s.api.networking.v1.IngressRule map: fields: @@ -9352,7 +10974,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: loadBalancer type: - namedType: io.k8s.api.core.v1.LoadBalancerStatus + namedType: io.k8s.api.networking.v1.IngressLoadBalancerStatus default: {} - name: io.k8s.api.networking.v1.IngressTLS map: @@ -9471,6 +11093,86 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric + elementRelationship: atomic +- name: io.k8s.api.networking.v1alpha1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference + map: + fields: + - name: group + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string +- name: io.k8s.api.networking.v1alpha1.ServiceCIDR + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec + default: {} + - name: status + type: + namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus + default: {} +- name: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec + map: + fields: + - name: cidrs + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -9493,6 +11195,29 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.networking.v1beta1.HTTPIngressPath elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1beta1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1beta1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1beta1.ParentReference - name: io.k8s.api.networking.v1beta1.Ingress map: fields: @@ -9526,7 +11251,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString - default: {} - name: io.k8s.api.networking.v1beta1.IngressClass map: fields: @@ -9573,6 +11297,44 @@ var schemaYAML = typed.YAMLObject(`types: - name: parameters type: namedType: io.k8s.api.networking.v1beta1.IngressClassParametersReference +- name: io.k8s.api.networking.v1beta1.IngressLoadBalancerIngress + map: + fields: + - name: hostname + type: + scalar: string + - name: ip + type: + scalar: string + - name: ports + type: + list: + elementType: + namedType: io.k8s.api.networking.v1beta1.IngressPortStatus + elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IngressLoadBalancerStatus + map: + fields: + - name: ingress + type: + list: + elementType: + namedType: io.k8s.api.networking.v1beta1.IngressLoadBalancerIngress + elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IngressPortStatus + map: + fields: + - name: error + type: + scalar: string + - name: port + type: + scalar: numeric + default: 0 + - name: protocol + type: + scalar: string + default: "" - name: io.k8s.api.networking.v1beta1.IngressRule map: fields: @@ -9608,7 +11370,7 @@ var schemaYAML = typed.YAMLObject(`types: fields: - name: loadBalancer type: - namedType: io.k8s.api.core.v1.LoadBalancerStatus + namedType: io.k8s.api.networking.v1beta1.IngressLoadBalancerStatus default: {} - name: io.k8s.api.networking.v1beta1.IngressTLS map: @@ -9622,6 +11384,62 @@ var schemaYAML = typed.YAMLObject(`types: - name: secretName type: scalar: string +- name: io.k8s.api.networking.v1beta1.ParentReference + map: + fields: + - name: group + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string +- name: io.k8s.api.networking.v1beta1.ServiceCIDR + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRSpec + default: {} + - name: status + type: + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRStatus + default: {} +- name: io.k8s.api.networking.v1beta1.ServiceCIDRSpec + map: + fields: + - name: cidrs + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.ServiceCIDRStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type - name: io.k8s.api.node.v1.Overhead map: fields: @@ -9816,6 +11634,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: unhealthyPodEvictionPolicy + type: + scalar: string - name: io.k8s.api.policy.v1.PodDisruptionBudgetStatus map: fields: @@ -9851,29 +11672,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.policy.v1beta1.AllowedCSIDriver - map: - fields: - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.AllowedFlexVolume - map: - fields: - - name: driver - type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.AllowedHostPath - map: - fields: - - name: pathPrefix - type: - scalar: string - - name: readOnly - type: - scalar: boolean - name: io.k8s.api.policy.v1beta1.Eviction map: fields: @@ -9890,40 +11688,6 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} -- name: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions - map: - fields: - - name: ranges - type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule - type: - scalar: string -- name: io.k8s.api.policy.v1beta1.HostPortRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 -- name: io.k8s.api.policy.v1beta1.IDRange - map: - fields: - - name: max - type: - scalar: numeric - default: 0 - - name: min - type: - scalar: numeric - default: 0 - name: io.k8s.api.policy.v1beta1.PodDisruptionBudget map: fields: @@ -9957,6 +11721,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: selector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: unhealthyPodEvictionPolicy + type: + scalar: string - name: io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus map: fields: @@ -9992,9 +11759,21 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric -- name: io.k8s.api.policy.v1beta1.PodSecurityPolicy +- name: io.k8s.api.rbac.v1.AggregationRule + map: + fields: + - name: clusterRoleSelectors + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + elementRelationship: atomic +- name: io.k8s.api.rbac.v1.ClusterRole map: fields: + - name: aggregationRule + type: + namedType: io.k8s.api.rbac.v1.AggregationRule - name: apiVersion type: scalar: string @@ -10005,183 +11784,306 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: spec + - name: rules type: - namedType: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec - default: {} -- name: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec + list: + elementType: + namedType: io.k8s.api.rbac.v1.PolicyRule + elementRelationship: atomic +- name: io.k8s.api.rbac.v1.ClusterRoleBinding map: fields: - - name: allowPrivilegeEscalation + - name: apiVersion type: - scalar: boolean - - name: allowedCSIDrivers + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: roleRef + type: + namedType: io.k8s.api.rbac.v1.RoleRef + default: {} + - name: subjects type: list: elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedCSIDriver + namedType: io.k8s.api.rbac.v1.Subject elementRelationship: atomic - - name: allowedCapabilities +- name: io.k8s.api.rbac.v1.PolicyRule + map: + fields: + - name: apiGroups type: list: elementType: scalar: string elementRelationship: atomic - - name: allowedFlexVolumes + - name: nonResourceURLs type: list: elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedFlexVolume + scalar: string elementRelationship: atomic - - name: allowedHostPaths + - name: resourceNames type: list: elementType: - namedType: io.k8s.api.policy.v1beta1.AllowedHostPath + scalar: string elementRelationship: atomic - - name: allowedProcMountTypes + - name: resources type: list: elementType: scalar: string elementRelationship: atomic - - name: allowedUnsafeSysctls + - name: verbs type: list: elementType: scalar: string elementRelationship: atomic - - name: defaultAddCapabilities +- name: io.k8s.api.rbac.v1.Role + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: rules type: list: elementType: - scalar: string + namedType: io.k8s.api.rbac.v1.PolicyRule elementRelationship: atomic - - name: defaultAllowPrivilegeEscalation +- name: io.k8s.api.rbac.v1.RoleBinding + map: + fields: + - name: apiVersion type: - scalar: boolean - - name: forbiddenSysctls + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: roleRef + type: + namedType: io.k8s.api.rbac.v1.RoleRef + default: {} + - name: subjects type: list: elementType: - scalar: string + namedType: io.k8s.api.rbac.v1.Subject elementRelationship: atomic - - name: fsGroup +- name: io.k8s.api.rbac.v1.RoleRef + map: + fields: + - name: apiGroup type: - namedType: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions - default: {} - - name: hostIPC + scalar: string + default: "" + - name: kind type: - scalar: boolean - - name: hostNetwork + scalar: string + default: "" + - name: name type: - scalar: boolean - - name: hostPID + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.rbac.v1.Subject + map: + fields: + - name: apiGroup type: - scalar: boolean - - name: hostPorts + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.rbac.v1alpha1.AggregationRule + map: + fields: + - name: clusterRoleSelectors type: list: elementType: - namedType: io.k8s.api.policy.v1beta1.HostPortRange + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector elementRelationship: atomic - - name: privileged +- name: io.k8s.api.rbac.v1alpha1.ClusterRole + map: + fields: + - name: aggregationRule type: - scalar: boolean - - name: readOnlyRootFilesystem + namedType: io.k8s.api.rbac.v1alpha1.AggregationRule + - name: apiVersion type: - scalar: boolean - - name: requiredDropCapabilities + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: rules type: list: elementType: - scalar: string + namedType: io.k8s.api.rbac.v1alpha1.PolicyRule elementRelationship: atomic - - name: runAsGroup - type: - namedType: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions - - name: runAsUser +- name: io.k8s.api.rbac.v1alpha1.ClusterRoleBinding + map: + fields: + - name: apiVersion type: - namedType: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions - default: {} - - name: runtimeClass + scalar: string + - name: kind type: - namedType: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions - - name: seLinux + scalar: string + - name: metadata type: - namedType: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: supplementalGroups + - name: roleRef type: - namedType: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions + namedType: io.k8s.api.rbac.v1alpha1.RoleRef default: {} - - name: volumes + - name: subjects type: list: elementType: - scalar: string + namedType: io.k8s.api.rbac.v1alpha1.Subject elementRelationship: atomic -- name: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions +- name: io.k8s.api.rbac.v1alpha1.PolicyRule map: fields: - - name: ranges + - name: apiGroups type: list: elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange + scalar: string elementRelationship: atomic - - name: rule + - name: nonResourceURLs type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions - map: - fields: - - name: ranges + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourceNames type: list: elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange + scalar: string elementRelationship: atomic - - name: rule + - name: resources type: - scalar: string - default: "" -- name: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions + list: + elementType: + scalar: string + elementRelationship: atomic + - name: verbs + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.rbac.v1alpha1.Role map: fields: - - name: allowedRuntimeClassNames + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: rules type: list: elementType: - scalar: string + namedType: io.k8s.api.rbac.v1alpha1.PolicyRule elementRelationship: atomic - - name: defaultRuntimeClassName +- name: io.k8s.api.rbac.v1alpha1.RoleBinding + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind type: scalar: string -- name: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: roleRef + type: + namedType: io.k8s.api.rbac.v1alpha1.RoleRef + default: {} + - name: subjects + type: + list: + elementType: + namedType: io.k8s.api.rbac.v1alpha1.Subject + elementRelationship: atomic +- name: io.k8s.api.rbac.v1alpha1.RoleRef map: fields: - - name: rule + - name: apiGroup type: scalar: string default: "" - - name: seLinuxOptions + - name: kind type: - namedType: io.k8s.api.core.v1.SELinuxOptions -- name: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.rbac.v1alpha1.Subject map: fields: - - name: ranges + - name: apiVersion type: - list: - elementType: - namedType: io.k8s.api.policy.v1beta1.IDRange - elementRelationship: atomic - - name: rule + scalar: string + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: namespace type: scalar: string -- name: io.k8s.api.rbac.v1.AggregationRule +- name: io.k8s.api.rbac.v1beta1.AggregationRule map: fields: - name: clusterRoleSelectors @@ -10190,12 +12092,12 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector elementRelationship: atomic -- name: io.k8s.api.rbac.v1.ClusterRole +- name: io.k8s.api.rbac.v1beta1.ClusterRole map: fields: - name: aggregationRule type: - namedType: io.k8s.api.rbac.v1.AggregationRule + namedType: io.k8s.api.rbac.v1beta1.AggregationRule - name: apiVersion type: scalar: string @@ -10210,9 +12112,9 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.rbac.v1.PolicyRule + namedType: io.k8s.api.rbac.v1beta1.PolicyRule elementRelationship: atomic -- name: io.k8s.api.rbac.v1.ClusterRoleBinding +- name: io.k8s.api.rbac.v1beta1.ClusterRoleBinding map: fields: - name: apiVersion @@ -10227,15 +12129,15 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: roleRef type: - namedType: io.k8s.api.rbac.v1.RoleRef + namedType: io.k8s.api.rbac.v1beta1.RoleRef default: {} - name: subjects type: list: elementType: - namedType: io.k8s.api.rbac.v1.Subject + namedType: io.k8s.api.rbac.v1beta1.Subject elementRelationship: atomic -- name: io.k8s.api.rbac.v1.PolicyRule +- name: io.k8s.api.rbac.v1beta1.PolicyRule map: fields: - name: apiGroups @@ -10268,7 +12170,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.rbac.v1.Role +- name: io.k8s.api.rbac.v1beta1.Role map: fields: - name: apiVersion @@ -10285,9 +12187,9 @@ var schemaYAML = typed.YAMLObject(`types: type: list: elementType: - namedType: io.k8s.api.rbac.v1.PolicyRule + namedType: io.k8s.api.rbac.v1beta1.PolicyRule elementRelationship: atomic -- name: io.k8s.api.rbac.v1.RoleBinding +- name: io.k8s.api.rbac.v1beta1.RoleBinding map: fields: - name: apiVersion @@ -10302,15 +12204,15 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: roleRef type: - namedType: io.k8s.api.rbac.v1.RoleRef + namedType: io.k8s.api.rbac.v1beta1.RoleRef default: {} - name: subjects type: list: elementType: - namedType: io.k8s.api.rbac.v1.Subject + namedType: io.k8s.api.rbac.v1beta1.Subject elementRelationship: atomic -- name: io.k8s.api.rbac.v1.RoleRef +- name: io.k8s.api.rbac.v1beta1.RoleRef map: fields: - name: apiGroup @@ -10325,8 +12227,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - elementRelationship: atomic -- name: io.k8s.api.rbac.v1.Subject +- name: io.k8s.api.rbac.v1beta1.Subject map: fields: - name: apiGroup @@ -10343,114 +12244,129 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string - elementRelationship: atomic -- name: io.k8s.api.rbac.v1alpha1.AggregationRule - map: - fields: - - name: clusterRoleSelectors - type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - elementRelationship: atomic -- name: io.k8s.api.rbac.v1alpha1.ClusterRole +- name: io.k8s.api.resource.v1alpha3.AllocationResult map: fields: - - name: aggregationRule - type: - namedType: io.k8s.api.rbac.v1alpha1.AggregationRule - - name: apiVersion - type: - scalar: string - - name: kind + - name: controller type: scalar: string - - name: metadata + - name: devices type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult default: {} - - name: rules + - name: nodeSelector type: - list: + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.resource.v1alpha3.BasicDevice + map: + fields: + - name: attributes + type: + map: elementType: - namedType: io.k8s.api.rbac.v1alpha1.PolicyRule - elementRelationship: atomic -- name: io.k8s.api.rbac.v1alpha1.ClusterRoleBinding + namedType: io.k8s.api.resource.v1alpha3.DeviceAttribute + - name: capacity + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.resource.v1alpha3.CELDeviceSelector map: fields: - - name: apiVersion + - name: expression type: scalar: string - - name: kind + default: "" +- name: io.k8s.api.resource.v1alpha3.Device + map: + fields: + - name: basic type: - scalar: string - - name: metadata + namedType: io.k8s.api.resource.v1alpha3.BasicDevice + - name: name type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: roleRef + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration + map: + fields: + - name: opaque type: - namedType: io.k8s.api.rbac.v1alpha1.RoleRef - default: {} - - name: subjects + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + - name: requests type: list: elementType: - namedType: io.k8s.api.rbac.v1alpha1.Subject + scalar: string elementRelationship: atomic -- name: io.k8s.api.rbac.v1alpha1.PolicyRule + - name: source + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationResult map: fields: - - name: apiGroups + - name: config type: list: elementType: - scalar: string + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration elementRelationship: atomic - - name: nonResourceURLs + - name: results type: list: elementType: - scalar: string + namedType: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult elementRelationship: atomic - - name: resourceNames +- name: io.k8s.api.resource.v1alpha3.DeviceAttribute + map: + fields: + - name: bool + type: + scalar: boolean + - name: int + type: + scalar: numeric + - name: string + type: + scalar: string + - name: version + type: + scalar: string +- name: io.k8s.api.resource.v1alpha3.DeviceClaim + map: + fields: + - name: config type: list: elementType: - scalar: string + namedType: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration elementRelationship: atomic - - name: resources + - name: constraints type: list: elementType: - scalar: string + namedType: io.k8s.api.resource.v1alpha3.DeviceConstraint elementRelationship: atomic - - name: verbs + - name: requests type: list: elementType: - scalar: string + namedType: io.k8s.api.resource.v1alpha3.DeviceRequest elementRelationship: atomic -- name: io.k8s.api.rbac.v1alpha1.Role +- name: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration map: fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata + - name: opaque type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: rules + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + - name: requests type: list: elementType: - namedType: io.k8s.api.rbac.v1alpha1.PolicyRule + scalar: string elementRelationship: atomic -- name: io.k8s.api.rbac.v1alpha1.RoleBinding +- name: io.k8s.api.resource.v1alpha3.DeviceClass map: fields: - name: apiVersion @@ -10463,24 +12379,60 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: roleRef + - name: spec type: - namedType: io.k8s.api.rbac.v1alpha1.RoleRef + namedType: io.k8s.api.resource.v1alpha3.DeviceClassSpec default: {} - - name: subjects +- name: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration +- name: io.k8s.api.resource.v1alpha3.DeviceClassSpec + map: + fields: + - name: config type: list: elementType: - namedType: io.k8s.api.rbac.v1alpha1.Subject + namedType: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration elementRelationship: atomic -- name: io.k8s.api.rbac.v1alpha1.RoleRef + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector + elementRelationship: atomic + - name: suitableNodes + type: + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.resource.v1alpha3.DeviceConstraint map: fields: - - name: apiGroup + - name: matchAttribute type: scalar: string - default: "" - - name: kind + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceRequest + map: + fields: + - name: adminAccess + type: + scalar: boolean + default: false + - name: allocationMode + type: + scalar: string + - name: count + type: + scalar: numeric + - name: deviceClassName type: scalar: string default: "" @@ -10488,38 +12440,50 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.rbac.v1alpha1.Subject + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult map: fields: - - name: apiVersion + - name: device type: scalar: string - - name: kind + default: "" + - name: driver type: scalar: string default: "" - - name: name + - name: pool type: scalar: string default: "" - - name: namespace + - name: request type: scalar: string -- name: io.k8s.api.rbac.v1beta1.AggregationRule + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceSelector map: fields: - - name: clusterRoleSelectors + - name: cel type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - elementRelationship: atomic -- name: io.k8s.api.rbac.v1beta1.ClusterRole + namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector +- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration map: fields: - - name: aggregationRule + - name: driver type: - namedType: io.k8s.api.rbac.v1beta1.AggregationRule + scalar: string + default: "" + - name: parameters + type: + namedType: __untyped_atomic_ +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContext + map: + fields: - name: apiVersion type: scalar: string @@ -10530,13 +12494,38 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: rules + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec + default: {} + - name: status + type: + namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus + default: {} +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec + map: + fields: + - name: potentialNodes type: list: elementType: - namedType: io.k8s.api.rbac.v1beta1.PolicyRule + scalar: string elementRelationship: atomic -- name: io.k8s.api.rbac.v1beta1.ClusterRoleBinding + - name: selectedNode + type: + scalar: string +- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus + map: + fields: + - name: resourceClaims + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus + elementRelationship: associative + keys: + - name +- name: io.k8s.api.resource.v1alpha3.ResourceClaim map: fields: - name: apiVersion @@ -10545,54 +12534,77 @@ var schemaYAML = typed.YAMLObject(`types: - name: kind type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: roleRef - type: - namedType: io.k8s.api.rbac.v1beta1.RoleRef - default: {} - - name: subjects + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec + default: {} + - name: status + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimStatus + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference + map: + fields: + - name: apiGroup + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: resource + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: unsuitableNodes type: list: elementType: - namedType: io.k8s.api.rbac.v1beta1.Subject + scalar: string elementRelationship: atomic -- name: io.k8s.api.rbac.v1beta1.PolicyRule +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec map: fields: - - name: apiGroups + - name: controller type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: nonResourceURLs + scalar: string + - name: devices type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: resourceNames + namedType: io.k8s.api.resource.v1alpha3.DeviceClaim + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimStatus + map: + fields: + - name: allocation type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: resources + namedType: io.k8s.api.resource.v1alpha3.AllocationResult + - name: deallocationRequested type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: verbs + scalar: boolean + - name: reservedFor type: list: elementType: - scalar: string - elementRelationship: atomic -- name: io.k8s.api.rbac.v1beta1.Role + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference + elementRelationship: associative + keys: + - uid +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplate map: fields: - name: apiVersion @@ -10605,67 +12617,79 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: rules + - name: spec type: - list: - elementType: - namedType: io.k8s.api.rbac.v1beta1.PolicyRule - elementRelationship: atomic -- name: io.k8s.api.rbac.v1beta1.RoleBinding + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec map: fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - name: metadata type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: roleRef + - name: spec type: - namedType: io.k8s.api.rbac.v1beta1.RoleRef + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec default: {} - - name: subjects - type: - list: - elementType: - namedType: io.k8s.api.rbac.v1beta1.Subject - elementRelationship: atomic -- name: io.k8s.api.rbac.v1beta1.RoleRef +- name: io.k8s.api.resource.v1alpha3.ResourcePool map: fields: - - name: apiGroup - type: - scalar: string - default: "" - - name: kind + - name: generation type: - scalar: string - default: "" + scalar: numeric + default: 0 - name: name type: scalar: string default: "" -- name: io.k8s.api.rbac.v1beta1.Subject + - name: resourceSliceCount + type: + scalar: numeric + default: 0 +- name: io.k8s.api.resource.v1alpha3.ResourceSlice map: fields: - - name: apiGroup + - name: apiVersion type: scalar: string - name: kind type: scalar: string - default: "" - - name: name + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceSliceSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceSliceSpec + map: + fields: + - name: allNodes + type: + scalar: boolean + - name: devices + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.Device + elementRelationship: atomic + - name: driver type: scalar: string default: "" - - name: namespace + - name: nodeName type: scalar: string + - name: nodeSelector + type: + namedType: io.k8s.api.core.v1.NodeSelector + - name: pool + type: + namedType: io.k8s.api.resource.v1alpha3.ResourcePool + default: {} - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: @@ -10776,6 +12800,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: requiresRepublish type: scalar: boolean + - name: seLinuxMount + type: + scalar: boolean - name: storageCapacity type: scalar: boolean @@ -10839,6 +12866,32 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - name +- name: io.k8s.api.storage.v1.CSIStorageCapacity + map: + fields: + - name: apiVersion + type: + scalar: string + - name: capacity + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: kind + type: + scalar: string + - name: maximumVolumeSize + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: nodeTopology + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector + - name: storageClassName + type: + scalar: string + default: "" - name: io.k8s.api.storage.v1.StorageClass map: fields: @@ -10964,7 +13017,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1.VolumeNodeResources map: fields: @@ -11060,6 +13112,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1alpha1.VolumeError +- name: io.k8s.api.storage.v1alpha1.VolumeAttributesClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parameters + type: + map: + elementType: + scalar: string - name: io.k8s.api.storage.v1alpha1.VolumeError map: fields: @@ -11069,7 +13143,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.CSIDriver map: fields: @@ -11102,6 +13175,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: requiresRepublish type: scalar: boolean + - name: seLinuxMount + type: + scalar: boolean - name: storageCapacity type: scalar: boolean @@ -11307,6 +13383,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1beta1.VolumeError +- name: io.k8s.api.storage.v1beta1.VolumeAttributesClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parameters + type: + map: + elementType: + scalar: string - name: io.k8s.api.storage.v1beta1.VolumeError map: fields: @@ -11316,13 +13414,89 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: io.k8s.api.storage.v1beta1.VolumeNodeResources map: fields: - name: count type: scalar: numeric +- name: io.k8s.api.storagemigration.v1alpha1.GroupVersionResource + map: + fields: + - name: group + type: + scalar: string + - name: resource + type: + scalar: string + - name: version + type: + scalar: string +- name: io.k8s.api.storagemigration.v1alpha1.MigrationCondition + map: + fields: + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec + default: {} + - name: status + type: + namedType: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus + default: {} +- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec + map: + fields: + - name: continueToken + type: + scalar: string + - name: resource + type: + namedType: io.k8s.api.storagemigration.v1alpha1.GroupVersionResource + default: {} +- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.api.storagemigration.v1alpha1.MigrationCondition + elementRelationship: associative + keys: + - type + - name: resourceVersion + type: + scalar: string - name: io.k8s.apimachinery.pkg.api.resource.Quantity scalar: untyped - name: io.k8s.apimachinery.pkg.apis.meta.v1.Condition @@ -11331,7 +13505,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: message type: scalar: string @@ -11456,13 +13629,9 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: scalar: string - - name: clusterName - type: - scalar: string - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time - default: {} - name: deletionGracePeriodSeconds type: scalar: numeric diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go index c84102cdd..466aaebb6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ConditionApplyConfiguration represents an declarative configuration of the Condition type for use +// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use // with apply. type ConditionApplyConfiguration struct { Type *string `json:"type,omitempty"` @@ -33,7 +33,7 @@ type ConditionApplyConfiguration struct { Message *string `json:"message,omitempty"` } -// ConditionApplyConfiguration constructs an declarative configuration of the Condition type for use with +// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with // apply. func Condition() *ConditionApplyConfiguration { return &ConditionApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go index 289bef43d..313bb9784 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeleteOptionsApplyConfiguration represents an declarative configuration of the DeleteOptions type for use +// DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use // with apply. type DeleteOptionsApplyConfiguration struct { TypeMetaApplyConfiguration `json:",inline"` @@ -33,10 +33,13 @@ type DeleteOptionsApplyConfiguration struct { DryRun []string `json:"dryRun,omitempty"` } -// DeleteOptionsApplyConfiguration constructs an declarative configuration of the DeleteOptions type for use with +// DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with // apply. func DeleteOptions() *DeleteOptionsApplyConfiguration { - return &DeleteOptionsApplyConfiguration{} + b := &DeleteOptionsApplyConfiguration{} + b.WithKind("DeleteOptions") + b.WithAPIVersion("meta.k8s.io/v1") + return b } // WithKind sets the Kind field in the declarative configuration to the given value diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go index 6d24bc363..1f33c94e0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LabelSelectorApplyConfiguration represents an declarative configuration of the LabelSelector type for use +// LabelSelectorApplyConfiguration represents a declarative configuration of the LabelSelector type for use // with apply. type LabelSelectorApplyConfiguration struct { MatchLabels map[string]string `json:"matchLabels,omitempty"` MatchExpressions []LabelSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// LabelSelectorApplyConfiguration constructs an declarative configuration of the LabelSelector type for use with +// LabelSelectorApplyConfiguration constructs a declarative configuration of the LabelSelector type for use with // apply. func LabelSelector() *LabelSelectorApplyConfiguration { return &LabelSelectorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go index ff70f365e..bd9db9659 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LabelSelectorRequirementApplyConfiguration represents an declarative configuration of the LabelSelectorRequirement type for use +// LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use // with apply. type LabelSelectorRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -30,7 +30,7 @@ type LabelSelectorRequirementApplyConfiguration struct { Values []string `json:"values,omitempty"` } -// LabelSelectorRequirementApplyConfiguration constructs an declarative configuration of the LabelSelectorRequirement type for use with +// LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with // apply. func LabelSelectorRequirement() *LabelSelectorRequirementApplyConfiguration { return &LabelSelectorRequirementApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go index f4d7e2681..6913df822 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ManagedFieldsEntryApplyConfiguration represents an declarative configuration of the ManagedFieldsEntry type for use +// ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use // with apply. type ManagedFieldsEntryApplyConfiguration struct { Manager *string `json:"manager,omitempty"` @@ -34,7 +34,7 @@ type ManagedFieldsEntryApplyConfiguration struct { Subresource *string `json:"subresource,omitempty"` } -// ManagedFieldsEntryApplyConfiguration constructs an declarative configuration of the ManagedFieldsEntry type for use with +// ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with // apply. func ManagedFieldsEntry() *ManagedFieldsEntryApplyConfiguration { return &ManagedFieldsEntryApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go index 0aeaeba27..a9419975e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go @@ -23,13 +23,12 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ObjectMetaApplyConfiguration represents an declarative configuration of the ObjectMeta type for use +// ObjectMetaApplyConfiguration represents a declarative configuration of the ObjectMeta type for use // with apply. type ObjectMetaApplyConfiguration struct { Name *string `json:"name,omitempty"` GenerateName *string `json:"generateName,omitempty"` Namespace *string `json:"namespace,omitempty"` - SelfLink *string `json:"selfLink,omitempty"` UID *types.UID `json:"uid,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` Generation *int64 `json:"generation,omitempty"` @@ -40,10 +39,9 @@ type ObjectMetaApplyConfiguration struct { Annotations map[string]string `json:"annotations,omitempty"` OwnerReferences []OwnerReferenceApplyConfiguration `json:"ownerReferences,omitempty"` Finalizers []string `json:"finalizers,omitempty"` - ClusterName *string `json:"clusterName,omitempty"` } -// ObjectMetaApplyConfiguration constructs an declarative configuration of the ObjectMeta type for use with +// ObjectMetaApplyConfiguration constructs a declarative configuration of the ObjectMeta type for use with // apply. func ObjectMeta() *ObjectMetaApplyConfiguration { return &ObjectMetaApplyConfiguration{} @@ -73,14 +71,6 @@ func (b *ObjectMetaApplyConfiguration) WithNamespace(value string) *ObjectMetaAp return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ObjectMetaApplyConfiguration) WithSelfLink(value string) *ObjectMetaApplyConfiguration { - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -180,10 +170,7 @@ func (b *ObjectMetaApplyConfiguration) WithFinalizers(values ...string) *ObjectM return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ObjectMetaApplyConfiguration) WithClusterName(value string) *ObjectMetaApplyConfiguration { - b.ClusterName = &value - return b +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ObjectMetaApplyConfiguration) GetName() *string { + return b.Name } diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go index b3117d6a4..277615232 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// OwnerReferenceApplyConfiguration represents an declarative configuration of the OwnerReference type for use +// OwnerReferenceApplyConfiguration represents a declarative configuration of the OwnerReference type for use // with apply. type OwnerReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` @@ -33,7 +33,7 @@ type OwnerReferenceApplyConfiguration struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty"` } -// OwnerReferenceApplyConfiguration constructs an declarative configuration of the OwnerReference type for use with +// OwnerReferenceApplyConfiguration constructs a declarative configuration of the OwnerReference type for use with // apply. func OwnerReference() *OwnerReferenceApplyConfiguration { return &OwnerReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go index f627733f1..8f8b6c6b3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// PreconditionsApplyConfiguration represents an declarative configuration of the Preconditions type for use +// PreconditionsApplyConfiguration represents a declarative configuration of the Preconditions type for use // with apply. type PreconditionsApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` } -// PreconditionsApplyConfiguration constructs an declarative configuration of the Preconditions type for use with +// PreconditionsApplyConfiguration constructs a declarative configuration of the Preconditions type for use with // apply. func Preconditions() *PreconditionsApplyConfiguration { return &PreconditionsApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go index 877b0890e..979044384 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TypeMetaApplyConfiguration represents an declarative configuration of the TypeMeta type for use +// TypeMetaApplyConfiguration represents a declarative configuration of the TypeMeta type for use // with apply. type TypeMetaApplyConfiguration struct { Kind *string `json:"kind,omitempty"` APIVersion *string `json:"apiVersion,omitempty"` } -// TypeMetaApplyConfiguration constructs an declarative configuration of the TypeMeta type for use with +// TypeMetaApplyConfiguration constructs a declarative configuration of the TypeMeta type for use with // apply. func TypeMeta() *TypeMetaApplyConfiguration { return &TypeMetaApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go index 8a58d9e87..a206bd326 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go @@ -125,7 +125,7 @@ func (e *extractor) extractUnstructured(object *unstructured.Unstructured, field return nil, fmt.Errorf("failed to fetch the objectType: %v", err) } result := &unstructured.Unstructured{} - err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) + err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) //nolint:forbidigo if err != nil { return nil, fmt.Errorf("failed calling ExtractInto for unstructured: %v", err) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go index 07b6a67f6..e39670f29 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/networking/v1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go index fef529d69..ad9a7a677 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go index 74c0bb273..607c26e94 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithSelfLink(value string) *IngressApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithClusterName(value string) *IngressApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go index 575713599..b014b7bee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { Service *IngressServiceBackendApplyConfiguration `json:"service,omitempty"` Resource *corev1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go index 5b36992e4..14acc7dbd 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct { Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithSelfLink(value string) *IngressClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *Ingre return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithClusterName(value string) *IngressClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *IngressClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -263,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go index a020d3a8d..0dba1ebc5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go index ec0423e70..23e848434 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go new file mode 100644 index 000000000..d0feb44da --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use +// with apply. +type IngressLoadBalancerIngressApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` +} + +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with +// apply. +func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { + return &IngressLoadBalancerIngressApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithIP(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.IP = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithHostname(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.Hostname = &value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithPorts(values ...*IngressPortStatusApplyConfiguration) *IngressLoadBalancerIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go new file mode 100644 index 000000000..08c841f06 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use +// with apply. +type IngressLoadBalancerStatusApplyConfiguration struct { + Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with +// apply. +func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { + return &IngressLoadBalancerStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *IngressLoadBalancerStatusApplyConfiguration) WithIngress(values ...*IngressLoadBalancerIngressApplyConfiguration) *IngressLoadBalancerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go new file mode 100644 index 000000000..b6411199f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use +// with apply. +type IngressPortStatusApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Protocol *v1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` +} + +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with +// apply. +func IngressPortStatus() *IngressPortStatusApplyConfiguration { + return &IngressPortStatusApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPortStatusApplyConfiguration { + b.Port = &value + return b +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { + b.Protocol = &value + return b +} + +// WithError sets the Error field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Error field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithError(value string) *IngressPortStatusApplyConfiguration { + b.Error = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go index 8153e88fe..4ef871f07 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go index d0e094387..1e13e378b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go index 399739631..07876afd1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressServiceBackendApplyConfiguration represents an declarative configuration of the IngressServiceBackend type for use +// IngressServiceBackendApplyConfiguration represents a declarative configuration of the IngressServiceBackend type for use // with apply. type IngressServiceBackendApplyConfiguration struct { Name *string `json:"name,omitempty"` Port *ServiceBackendPortApplyConfiguration `json:"port,omitempty"` } -// IngressServiceBackendApplyConfiguration constructs an declarative configuration of the IngressServiceBackend type for use with +// IngressServiceBackendApplyConfiguration constructs a declarative configuration of the IngressServiceBackend type for use with // apply. func IngressServiceBackend() *IngressServiceBackendApplyConfiguration { return &IngressServiceBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go index 635514ecf..0572153aa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go index dd8b25d83..bd1327c93 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go @@ -18,17 +18,13 @@ limitations under the License. package v1 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { - LoadBalancer *v1.LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` + LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} @@ -37,7 +33,7 @@ func IngressStatus() *IngressStatusApplyConfiguration { // WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *v1.LoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { +func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *IngressLoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { b.LoadBalancer = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go index 4d8d369f7..44092503f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go index 1efd6edfd..f3447a8f1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 7091d7cfd..3f8c8a535 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct { Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithSelfLink(value string) *NetworkPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *Netw return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithClusterName(value string) *NetworkPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *NetworkPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go index e5751c441..46e2706ec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go index 630fe1fab..6e9875978 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go index 909b651c0..046de3e23 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct { IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go index 73dbed1d8..581ef1c34 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { Protocol *v1.Protocol `json:"protocol,omitempty"` @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct { EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go index 882d8233a..da5ed5d35 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct { PolicyTypes []apinetworkingv1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go index ec278960c..517f97483 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ServiceBackendPortApplyConfiguration represents an declarative configuration of the ServiceBackendPort type for use +// ServiceBackendPortApplyConfiguration represents a declarative configuration of the ServiceBackendPort type for use // with apply. type ServiceBackendPortApplyConfiguration struct { Name *string `json:"name,omitempty"` Number *int32 `json:"number,omitempty"` } -// ServiceBackendPortApplyConfiguration constructs an declarative configuration of the ServiceBackendPort type for use with +// ServiceBackendPortApplyConfiguration constructs a declarative configuration of the ServiceBackendPort type for use with // apply. func ServiceBackendPort() *ServiceBackendPortApplyConfiguration { return &ServiceBackendPortApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go new file mode 100644 index 000000000..999c23fa1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use +// with apply. +type IPAddressApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` +} + +// IPAddress constructs a declarative configuration of the IPAddress type for use with +// apply. +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} + b.WithName(name) + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b +} + +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") +} + +// ExtractIPAddressStatus is the same as ExtractIPAddress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIPAddressStatus(iPAddress *networkingv1alpha1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") +} + +func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1alpha1.IPAddress"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(iPAddress.Name) + + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go new file mode 100644 index 000000000..bf025a8c1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go new file mode 100644 index 000000000..d5a52d503 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use +// with apply. +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with +// apply. +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go new file mode 100644 index 000000000..984e049f2 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use +// with apply. +type ServiceCIDRApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with +// apply. +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { + b := &ServiceCIDRApplyConfiguration{} + b.WithName(name) + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b +} + +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "") +} + +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "status") +} + +func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { + b := &ServiceCIDRApplyConfiguration{} + err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ServiceCIDR"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(serviceCIDR.Name) + + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go new file mode 100644 index 000000000..7875ff403 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use +// with apply. +type ServiceCIDRSpecApplyConfiguration struct { + CIDRs []string `json:"cidrs,omitempty"` +} + +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with +// apply. +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { + return &ServiceCIDRSpecApplyConfiguration{} +} + +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CIDRs field. +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { + for i := range values { + b.CIDRs = append(b.CIDRs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go new file mode 100644 index 000000000..34715e3a4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use +// with apply. +type ServiceCIDRStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with +// apply. +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { + return &ServiceCIDRStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go index b12907e81..61b458f7e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/networking/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -30,7 +30,7 @@ type HTTPIngressPathApplyConfiguration struct { Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go index 3137bc5eb..124545223 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go index 6b87d1ff3..0df53ea65 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithSelfLink(value string) *IngressApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithClusterName(value string) *IngressApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go index f19c2f2ee..9d386f160 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go index 3a13cd083..b0e877b57 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct { Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithSelfLink(value string) *IngressClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *Ingre return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithClusterName(value string) *IngressClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *IngressClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -263,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go index e6ca805e4..2a307a676 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go index 51040462c..eefbf62b8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go new file mode 100644 index 000000000..12dbc3596 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use +// with apply. +type IngressLoadBalancerIngressApplyConfiguration struct { + IP *string `json:"ip,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` +} + +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with +// apply. +func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { + return &IngressLoadBalancerIngressApplyConfiguration{} +} + +// WithIP sets the IP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IP field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithIP(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.IP = &value + return b +} + +// WithHostname sets the Hostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Hostname field is set to the value of the last call. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithHostname(value string) *IngressLoadBalancerIngressApplyConfiguration { + b.Hostname = &value + return b +} + +// WithPorts adds the given value to the Ports field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ports field. +func (b *IngressLoadBalancerIngressApplyConfiguration) WithPorts(values ...*IngressPortStatusApplyConfiguration) *IngressLoadBalancerIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPorts") + } + b.Ports = append(b.Ports, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go new file mode 100644 index 000000000..e896ab341 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use +// with apply. +type IngressLoadBalancerStatusApplyConfiguration struct { + Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with +// apply. +func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { + return &IngressLoadBalancerStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *IngressLoadBalancerStatusApplyConfiguration) WithIngress(values ...*IngressLoadBalancerIngressApplyConfiguration) *IngressLoadBalancerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go new file mode 100644 index 000000000..4ee3f0161 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use +// with apply. +type IngressPortStatusApplyConfiguration struct { + Port *int32 `json:"port,omitempty"` + Protocol *v1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` +} + +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with +// apply. +func IngressPortStatus() *IngressPortStatusApplyConfiguration { + return &IngressPortStatusApplyConfiguration{} +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPortStatusApplyConfiguration { + b.Port = &value + return b +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { + b.Protocol = &value + return b +} + +// WithError sets the Error field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Error field is set to the value of the last call. +func (b *IngressPortStatusApplyConfiguration) WithError(value string) *IngressPortStatusApplyConfiguration { + b.Error = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go index 015541eeb..dc676f7b6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go index 2d03c7b13..4a6412475 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go index 1ab4d8bb7..58fbde8b3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go index 941769594..3aed61688 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go @@ -18,17 +18,13 @@ limitations under the License. package v1beta1 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { - LoadBalancer *v1.LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` + LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} @@ -37,7 +33,7 @@ func IngressStatus() *IngressStatusApplyConfiguration { // WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LoadBalancer field is set to the value of the last call. -func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *v1.LoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { +func (b *IngressStatusApplyConfiguration) WithLoadBalancer(value *IngressLoadBalancerStatusApplyConfiguration) *IngressStatusApplyConfiguration { b.LoadBalancer = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go index 8ca93a0bc..63648cd46 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go new file mode 100644 index 000000000..3047d79b9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use +// with apply. +type IPAddressApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` +} + +// IPAddress constructs a declarative configuration of the IPAddress type for use with +// apply. +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} + b.WithName(name) + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b +} + +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") +} + +// ExtractIPAddressStatus is the same as ExtractIPAddress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIPAddressStatus(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") +} + +func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1beta1.IPAddress"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(iPAddress.Name) + + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go similarity index 50% rename from vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go index 30c3724cf..76b02137d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go @@ -18,22 +18,22 @@ limitations under the License. package v1beta1 -// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use // with apply. -type AllowedFlexVolumeApplyConfiguration struct { - Driver *string `json:"driver,omitempty"` +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` } -// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with // apply. -func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { - return &AllowedFlexVolumeApplyConfiguration{} +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} } -// WithDriver sets the Driver field in the declarative configuration to the given value +// WithParentRef sets the ParentRef field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Driver field is set to the value of the last call. -func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { - b.Driver = &value +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go new file mode 100644 index 000000000..1863938f1 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use +// with apply. +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with +// apply. +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go similarity index 63% rename from vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go index 0500824c9..4ef8e9eca 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - policyv1beta1 "k8s.io/api/policy/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,63 +27,64 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use // with apply. -type PodSecurityPolicyApplyConfiguration struct { +type ServiceCIDRApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` + Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` } -// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with // apply. -func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { - b := &PodSecurityPolicyApplyConfiguration{} +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { + b := &ServiceCIDRApplyConfiguration{} b.WithName(name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("policy/v1beta1") + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") return b } -// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from -// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a -// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. -// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") +func ExtractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "") } -// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodSecurityPolicyStatus(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { - return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "status") } -func extractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { - b := &PodSecurityPolicyApplyConfiguration{} - err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) +func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { + b := &ServiceCIDRApplyConfiguration{} + err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1beta1.ServiceCIDR"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(podSecurityPolicy.Name) + b.WithName(serviceCIDR.Name) - b.WithKind("PodSecurityPolicy") - b.WithAPIVersion("policy/v1beta1") + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { b.Kind = &value return b } @@ -91,7 +92,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurit // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { b.APIVersion = &value return b } @@ -99,7 +100,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodS // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -108,7 +109,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurit // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -117,25 +118,16 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *Po // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSelfLink(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -144,7 +136,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecur // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -153,7 +145,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -162,7 +154,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSe // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -171,7 +163,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1 // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -180,7 +172,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1 // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -190,7 +182,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -205,7 +197,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]stri // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -219,7 +211,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -233,7 +225,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1. // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -241,16 +233,7 @@ func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) * return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithClusterName(value string) *PodSecurityPolicyApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - -func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -259,7 +242,21 @@ func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfiguration // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { b.Spec = value return b } + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go new file mode 100644 index 000000000..1f283532d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use +// with apply. +type ServiceCIDRSpecApplyConfiguration struct { + CIDRs []string `json:"cidrs,omitempty"` +} + +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with +// apply. +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { + return &ServiceCIDRSpecApplyConfiguration{} +} + +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CIDRs field. +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { + for i := range values { + b.CIDRs = append(b.CIDRs, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go new file mode 100644 index 000000000..f2dd92404 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use +// with apply. +type ServiceCIDRStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with +// apply. +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { + return &ServiceCIDRStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go index 9eec00267..6694538fc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go index 1521f2cef..6ce01a319 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithSelfLink(value string) *RuntimeClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *Runti return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithClusterName(value string) *RuntimeClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RuntimeClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -281,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go index e01db85d7..2d084e0f5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go index 1ddaa64ac..84770a092 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go index 295e763d7..9f139ee1b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RuntimeClassApplyConfiguration struct { Spec *RuntimeClassSpecApplyConfiguration `json:"spec,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithSelfLink(value string) *RuntimeClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *Runti return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithClusterName(value string) *RuntimeClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RuntimeClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -263,3 +245,9 @@ func (b *RuntimeClassApplyConfiguration) WithSpec(value *RuntimeClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go index 86e8585ad..1aa43eb13 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RuntimeClassSpecApplyConfiguration represents an declarative configuration of the RuntimeClassSpec type for use +// RuntimeClassSpecApplyConfiguration represents a declarative configuration of the RuntimeClassSpec type for use // with apply. type RuntimeClassSpecApplyConfiguration struct { RuntimeHandler *string `json:"runtimeHandler,omitempty"` @@ -26,7 +26,7 @@ type RuntimeClassSpecApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClassSpecApplyConfiguration constructs an declarative configuration of the RuntimeClassSpec type for use with +// RuntimeClassSpecApplyConfiguration constructs a declarative configuration of the RuntimeClassSpec type for use with // apply. func RuntimeClassSpec() *RuntimeClassSpecApplyConfiguration { return &RuntimeClassSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go index d4117d6bc..6ce49ad86 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go index e8c489550..cf767e702 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go index 2424e205e..fa6c9f45b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithSelfLink(value string) *RuntimeClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *Runti return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithClusterName(value string) *RuntimeClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RuntimeClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -281,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go index 10831d0ff..23d0b9752 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go +++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go index 07bda7467..3a051619f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct { DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithSelfLink(value string) *EvictionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionA return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithClusterName(value string) *EvictionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EvictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go index 888c20f60..a765a7b62 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct { Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithSelfLink(value string) *PodDisruptionBudgetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithClusterName(value string) *PodDisruptionBudgetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PodDisruptionBudgetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go index e2f49f528..291714545 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go @@ -19,19 +19,21 @@ limitations under the License. package v1 import ( + policyv1 "k8s.io/api/policy/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { - MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} @@ -60,3 +62,11 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMaxUnavailable(value int b.MaxUnavailable = &value return b } + +// WithUnhealthyPodEvictionPolicy sets the UnhealthyPodEvictionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnhealthyPodEvictionPolicy field is set to the value of the last call. +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value policyv1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { + b.UnhealthyPodEvictionPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go index 2dd427b9e..d0f9baf41 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go deleted file mode 100644 index 493815d8d..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use -// with apply. -type AllowedHostPathApplyConfiguration struct { - PathPrefix *string `json:"pathPrefix,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` -} - -// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with -// apply. -func AllowedHostPath() *AllowedHostPathApplyConfiguration { - return &AllowedHostPathApplyConfiguration{} -} - -// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PathPrefix field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { - b.PathPrefix = &value - return b -} - -// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnly field is set to the value of the last call. -func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { - b.ReadOnly = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go index e1f0f137e..d4121af20 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct { DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithSelfLink(value string) *EvictionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionA return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithClusterName(value string) *EvictionApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *EvictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -265,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go deleted file mode 100644 index 06803b439..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use -// with apply. -type FSGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with -// apply. -func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { - return &FSGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go deleted file mode 100644 index 7c7968813..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use -// with apply. -type HostPortRangeApplyConfiguration struct { - Min *int32 `json:"min,omitempty"` - Max *int32 `json:"max,omitempty"` -} - -// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with -// apply. -func HostPortRange() *HostPortRangeApplyConfiguration { - return &HostPortRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go deleted file mode 100644 index af46f7658..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use -// with apply. -type IDRangeApplyConfiguration struct { - Min *int64 `json:"min,omitempty"` - Max *int64 `json:"max,omitempty"` -} - -// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with -// apply. -func IDRange() *IDRangeApplyConfiguration { - return &IDRangeApplyConfiguration{} -} - -// WithMin sets the Min field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Min field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { - b.Min = &value - return b -} - -// WithMax sets the Max field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Max field is set to the value of the last call. -func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { - b.Max = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go index fc28026f5..813b57bae 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct { Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithSelfLink(value string) *PodDisruptionBudgetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithClusterName(value string) *PodDisruptionBudgetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PodDisruptionBudgetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -274,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go index b5d17d3fe..405f1148b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go @@ -19,19 +19,21 @@ limitations under the License. package v1beta1 import ( + v1beta1 "k8s.io/api/policy/v1beta1" intstr "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { - MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} @@ -60,3 +62,11 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMaxUnavailable(value int b.MaxUnavailable = &value return b } + +// WithUnhealthyPodEvictionPolicy sets the UnhealthyPodEvictionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnhealthyPodEvictionPolicy field is set to the value of the last call. +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value v1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { + b.UnhealthyPodEvictionPolicy = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go index d0813590e..e66a7fb38 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go deleted file mode 100644 index bf951cf56..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go +++ /dev/null @@ -1,285 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use -// with apply. -type PodSecurityPolicySpecApplyConfiguration struct { - Privileged *bool `json:"privileged,omitempty"` - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` - Volumes []v1beta1.FSType `json:"volumes,omitempty"` - HostNetwork *bool `json:"hostNetwork,omitempty"` - HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` - HostPID *bool `json:"hostPID,omitempty"` - HostIPC *bool `json:"hostIPC,omitempty"` - SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` - RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` - RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` - SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` - FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` - AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` - AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` - AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` - RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` -} - -// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with -// apply. -func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { - return &PodSecurityPolicySpecApplyConfiguration{} -} - -// WithPrivileged sets the Privileged field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Privileged field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.Privileged = &value - return b -} - -// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) - } - return b -} - -// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) - } - return b -} - -// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) - } - return b -} - -// WithVolumes adds the given value to the Volumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Volumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.Volumes = append(b.Volumes, values[i]) - } - return b -} - -// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostNetwork field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostNetwork = &value - return b -} - -// WithHostPorts adds the given value to the HostPorts field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the HostPorts field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithHostPorts") - } - b.HostPorts = append(b.HostPorts, *values[i]) - } - return b -} - -// WithHostPID sets the HostPID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostPID field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostPID = &value - return b -} - -// WithHostIPC sets the HostIPC field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the HostIPC field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.HostIPC = &value - return b -} - -// WithSELinux sets the SELinux field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinux field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SELinux = value - return b -} - -// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsUser field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsUser = value - return b -} - -// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RunAsGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RunAsGroup = value - return b -} - -// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SupplementalGroups field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.SupplementalGroups = value - return b -} - -// WithFSGroup sets the FSGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the FSGroup field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.FSGroup = value - return b -} - -// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.ReadOnlyRootFilesystem = &value - return b -} - -// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.DefaultAllowPrivilegeEscalation = &value - return b -} - -// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { - b.AllowPrivilegeEscalation = &value - return b -} - -// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedHostPaths") - } - b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) - } - return b -} - -// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedFlexVolumes") - } - b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) - } - return b -} - -// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAllowedCSIDrivers") - } - b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) - } - return b -} - -// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) - } - return b -} - -// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) - } - return b -} - -// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. -func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { - for i := range values { - b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) - } - return b -} - -// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the RuntimeClass field is set to the value of the last call. -func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { - b.RuntimeClass = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go deleted file mode 100644 index fcfcfbe6b..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use -// with apply. -type RunAsGroupStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with -// apply. -func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { - return &RunAsGroupStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go deleted file mode 100644 index a6d6ee58e..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use -// with apply. -type RunAsUserStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with -// apply. -func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { - return &RunAsUserStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go deleted file mode 100644 index c19a7ce61..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use -// with apply. -type RuntimeClassStrategyOptionsApplyConfiguration struct { - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` -} - -// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with -// apply. -func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { - return &RuntimeClassStrategyOptionsApplyConfiguration{} -} - -// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { - for i := range values { - b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) - } - return b -} - -// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. -func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { - b.DefaultRuntimeClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go deleted file mode 100644 index de7ede618..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use -// with apply. -type SELinuxStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` - SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with -// apply. -func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { - return &SELinuxStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SELinuxOptions field is set to the value of the last call. -func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { - b.SELinuxOptions = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go deleted file mode 100644 index 9e4a9bb2c..000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" -) - -// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use -// with apply. -type SupplementalGroupsStrategyOptionsApplyConfiguration struct { - Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` - Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with -// apply. -func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { - return &SupplementalGroupsStrategyOptionsApplyConfiguration{} -} - -// WithRule sets the Rule field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Rule field is set to the value of the last call. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { - b.Rule = &value - return b -} - -// WithRanges adds the given value to the Ranges field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ranges field. -func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRanges") - } - b.Ranges = append(b.Ranges, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go index fda9205c2..5ae4dc37f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go index 1b4b51596..c5b0075ec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithSelfLink(value string) *ClusterRoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *Cluste return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithClusterName(value string) *ClusterRoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ClusterRoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -277,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go index d17fc6e55..91a9d5df3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithSelfLink(value string) *ClusterRoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithClusterName(value string) *ClusterRoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ClusterRoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -277,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go index 65ee1d4fe..a2e66d109 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go index 854441bbd..b51f90426 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithSelfLink(value string) *RoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConf return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithClusterName(value string) *RoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -270,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go index 88075a70d..e59c8e6d3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithSelfLink(value string) *RoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBi return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithClusterName(value string) *RoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -279,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go index ef03a4882..646a3bb19 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go index ebc87fdc4..e1d9c5cfb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go index 63cdc3fcc..ff4aeb59e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go index ae9cfc474..dc0e34e53 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithSelfLink(value string) *ClusterRoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *Cluste return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithClusterName(value string) *ClusterRoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ClusterRoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -277,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go index f5a2c03bb..d3c12ec50 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithSelfLink(value string) *ClusterRoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithClusterName(value string) *ClusterRoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ClusterRoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -277,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go index 12143af13..89d7a2914 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go index ec5bebcec..db0a4f716 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithSelfLink(value string) *RoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConf return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithClusterName(value string) *RoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -270,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go index 930f9489f..8efcddd69 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithSelfLink(value string) *RoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBi return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithClusterName(value string) *RoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -279,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go index 40dbc3307..4b2553117 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go index 46640dbbe..665b42af5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go index d52ac3db9..e9bb68dcb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go index 1574f8f66..5e9c23854 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithSelfLink(value string) *ClusterRoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *Cluste return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithClusterName(value string) *ClusterRoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ClusterRoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -277,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go index 152a23b49..2f088b93e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithSelfLink(value string) *ClusterRoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithClusterName(value string) *ClusterRoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *ClusterRoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -277,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go index c63dc68c6..dc630df20 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go index dc6ff2a40..4b1b6112b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -125,15 +125,6 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithSelfLink(value string) *RoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -243,15 +234,6 @@ func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConf return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithClusterName(value string) *RoleApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -270,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go index aeef6ec8f..246928553 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -126,15 +126,6 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithSelfLink(value string) *RoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -244,15 +235,6 @@ func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBi return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithClusterName(value string) *RoleBindingApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *RoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -279,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go index e6a02dc60..19d0420a8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go index b616da8b1..f7c1a21a9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go new file mode 100644 index 000000000..3090b2f9d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use +// with apply. +type AllocationResultApplyConfiguration struct { + Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + Controller *string `json:"controller,omitempty"` +} + +// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with +// apply. +func AllocationResult() *AllocationResultApplyConfiguration { + return &AllocationResultApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration { + b.Devices = value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithController sets the Controller field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Controller field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithController(value string) *AllocationResultApplyConfiguration { + b.Controller = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go new file mode 100644 index 000000000..e6b774508 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use +// with apply. +type BasicDeviceApplyConfiguration struct { + Attributes map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` + Capacity map[v1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"` +} + +// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with +// apply. +func BasicDevice() *BasicDeviceApplyConfiguration { + return &BasicDeviceApplyConfiguration{} +} + +// WithAttributes puts the entries into the Attributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Attributes field, +// overwriting an existing map entries in Attributes field with the same key. +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Attributes == nil && len(entries) > 0 { + b.Attributes = make(map[v1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Attributes[k] = v + } + return b +} + +// WithCapacity puts the entries into the Capacity field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Capacity field, +// overwriting an existing map entries in Capacity field with the same key. +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[v1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration { + if b.Capacity == nil && len(entries) > 0 { + b.Capacity = make(map[v1alpha3.QualifiedName]resource.Quantity, len(entries)) + } + for k, v := range entries { + b.Capacity[k] = v + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go new file mode 100644 index 000000000..c59b6a2e3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use +// with apply. +type CELDeviceSelectorApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with +// apply. +func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration { + return &CELDeviceSelectorApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration { + b.Expression = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go new file mode 100644 index 000000000..efdb5f37a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceApplyConfiguration represents a declarative configuration of the Device type for use +// with apply. +type DeviceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"` +} + +// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with +// apply. +func Device() *DeviceApplyConfiguration { + return &DeviceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration { + b.Name = &value + return b +} + +// WithBasic sets the Basic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Basic field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration { + b.Basic = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go new file mode 100644 index 000000000..342e724ef --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use +// with apply. +type DeviceAllocationConfigurationApplyConfiguration struct { + Source *v1alpha3.AllocationConfigSource `json:"source,omitempty"` + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with +// apply. +func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration { + return &DeviceAllocationConfigurationApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value v1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { + b.Source = &value + return b +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go new file mode 100644 index 000000000..0cfb264b4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use +// with apply. +type DeviceAllocationResultApplyConfiguration struct { + Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"` + Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with +// apply. +func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration { + return &DeviceAllocationResultApplyConfiguration{} +} + +// WithResults adds the given value to the Results field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Results field. +func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResults") + } + b.Results = append(b.Results, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go new file mode 100644 index 000000000..6b0b7a40a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use +// with apply. +type DeviceAttributeApplyConfiguration struct { + IntValue *int64 `json:"int,omitempty"` + BoolValue *bool `json:"bool,omitempty"` + StringValue *string `json:"string,omitempty"` + VersionValue *string `json:"version,omitempty"` +} + +// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with +// apply. +func DeviceAttribute() *DeviceAttributeApplyConfiguration { + return &DeviceAttributeApplyConfiguration{} +} + +// WithIntValue sets the IntValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration { + b.IntValue = &value + return b +} + +// WithBoolValue sets the BoolValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BoolValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration { + b.BoolValue = &value + return b +} + +// WithStringValue sets the StringValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StringValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration { + b.StringValue = &value + return b +} + +// WithVersionValue sets the VersionValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VersionValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration { + b.VersionValue = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go new file mode 100644 index 000000000..ce3ab56d8 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use +// with apply. +type DeviceClaimApplyConfiguration struct { + Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"` + Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"` + Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with +// apply. +func DeviceClaim() *DeviceClaimApplyConfiguration { + return &DeviceClaimApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequests") + } + b.Requests = append(b.Requests, *values[i]) + } + return b +} + +// WithConstraints adds the given value to the Constraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Constraints field. +func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConstraints") + } + b.Constraints = append(b.Constraints, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go new file mode 100644 index 000000000..4cabe9859 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use +// with apply. +type DeviceClaimConfigurationApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with +// apply. +func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration { + return &DeviceClaimConfigurationApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go new file mode 100644 index 000000000..abaadbb36 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use +// with apply. +type DeviceClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"` +} + +// DeviceClass constructs a declarative configuration of the DeviceClass type for use with +// apply. +func DeviceClass(name string) *DeviceClassApplyConfiguration { + b := &DeviceClassApplyConfiguration{} + b.WithName(name) + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractDeviceClass extracts the applied configuration owned by fieldManager from +// deviceClass. If no managedFields are found in deviceClass for fieldManager, a +// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API. +// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "") +} + +// ExtractDeviceClassStatus is the same as ExtractDeviceClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeviceClassStatus(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "status") +} + +func extractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) { + b := &DeviceClassApplyConfiguration{} + err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha3.DeviceClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(deviceClass.Name) + + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeviceClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go new file mode 100644 index 000000000..cb3758a3e --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use +// with apply. +type DeviceClassConfigurationApplyConfiguration struct { + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with +// apply. +func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration { + return &DeviceClassConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go new file mode 100644 index 000000000..d40a43de6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use +// with apply. +type DeviceClassSpecApplyConfiguration struct { + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` + SuitableNodes *v1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` +} + +// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with +// apply. +func DeviceClassSpec() *DeviceClassSpecApplyConfiguration { + return &DeviceClassSpecApplyConfiguration{} +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} + +// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuitableNodes field is set to the value of the last call. +func (b *DeviceClassSpecApplyConfiguration) WithSuitableNodes(value *v1.NodeSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + b.SuitableNodes = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go new file mode 100644 index 000000000..62c0d997d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use +// with apply. +type DeviceConfigurationApplyConfiguration struct { + Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"` +} + +// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with +// apply. +func DeviceConfiguration() *DeviceConfigurationApplyConfiguration { + return &DeviceConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go new file mode 100644 index 000000000..479acd57c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use +// with apply. +type DeviceConstraintApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + MatchAttribute *v1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"` +} + +// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with +// apply. +func DeviceConstraint() *DeviceConstraintApplyConfiguration { + return &DeviceConstraintApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchAttribute field is set to the value of the last call. +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value v1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration { + b.MatchAttribute = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go new file mode 100644 index 000000000..e5c87efe4 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use +// with apply. +type DeviceRequestApplyConfiguration struct { + Name *string `json:"name,omitempty"` + DeviceClassName *string `json:"deviceClassName,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + AllocationMode *resourcev1alpha3.DeviceAllocationMode `json:"allocationMode,omitempty"` + Count *int64 `json:"count,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with +// apply. +func DeviceRequest() *DeviceRequestApplyConfiguration { + return &DeviceRequestApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration { + b.Name = &value + return b +} + +// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeviceClassName field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration { + b.DeviceClassName = &value + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocationMode field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1alpha3.DeviceAllocationMode) *DeviceRequestApplyConfiguration { + b.AllocationMode = &value + return b +} + +// WithCount sets the Count field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Count field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration { + b.Count = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go new file mode 100644 index 000000000..712b9bf9b --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use +// with apply. +type DeviceRequestAllocationResultApplyConfiguration struct { + Request *string `json:"request,omitempty"` + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` +} + +// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with +// apply. +func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration { + return &DeviceRequestAllocationResultApplyConfiguration{} +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Request = &value + return b +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Device = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go new file mode 100644 index 000000000..574299d15 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use +// with apply. +type DeviceSelectorApplyConfiguration struct { + CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"` +} + +// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with +// apply. +func DeviceSelector() *DeviceSelectorApplyConfiguration { + return &DeviceSelectorApplyConfiguration{} +} + +// WithCEL sets the CEL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CEL field is set to the value of the last call. +func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration { + b.CEL = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go new file mode 100644 index 000000000..caf9d059c --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use +// with apply. +type OpaqueDeviceConfigurationApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Parameters *runtime.RawExtension `json:"parameters,omitempty"` +} + +// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with +// apply. +func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration { + return &OpaqueDeviceConfigurationApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration { + b.Driver = &value + return b +} + +// WithParameters sets the Parameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Parameters field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration { + b.Parameters = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go new file mode 100644 index 000000000..ee8e73ebe --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go @@ -0,0 +1,264 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// PodSchedulingContextApplyConfiguration represents a declarative configuration of the PodSchedulingContext type for use +// with apply. +type PodSchedulingContextApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` +} + +// PodSchedulingContext constructs a declarative configuration of the PodSchedulingContext type for use with +// apply. +func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { + b := &PodSchedulingContextApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from +// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a +// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. +// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") +} + +// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { + return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") +} + +func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { + b := &PodSchedulingContextApplyConfiguration{} + err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha3.PodSchedulingContext"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(podSchedulingContext.Name) + b.WithNamespace(podSchedulingContext.Namespace) + + b.WithKind("PodSchedulingContext") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodSchedulingContextApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go new file mode 100644 index 000000000..fd25df7a5 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// PodSchedulingContextSpecApplyConfiguration represents a declarative configuration of the PodSchedulingContextSpec type for use +// with apply. +type PodSchedulingContextSpecApplyConfiguration struct { + SelectedNode *string `json:"selectedNode,omitempty"` + PotentialNodes []string `json:"potentialNodes,omitempty"` +} + +// PodSchedulingContextSpecApplyConfiguration constructs a declarative configuration of the PodSchedulingContextSpec type for use with +// apply. +func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { + return &PodSchedulingContextSpecApplyConfiguration{} +} + +// WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelectedNode field is set to the value of the last call. +func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { + b.SelectedNode = &value + return b +} + +// WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PotentialNodes field. +func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { + for i := range values { + b.PotentialNodes = append(b.PotentialNodes, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go new file mode 100644 index 000000000..a06e370cc --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go @@ -0,0 +1,44 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// PodSchedulingContextStatusApplyConfiguration represents a declarative configuration of the PodSchedulingContextStatus type for use +// with apply. +type PodSchedulingContextStatusApplyConfiguration struct { + ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` +} + +// PodSchedulingContextStatusApplyConfiguration constructs a declarative configuration of the PodSchedulingContextStatus type for use with +// apply. +func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { + return &PodSchedulingContextStatusApplyConfiguration{} +} + +// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceClaims field. +func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceClaims") + } + b.ResourceClaims = append(b.ResourceClaims, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go new file mode 100644 index 000000000..616159558 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,264 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use +// with apply. +type ResourceClaimApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` + Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` +} + +// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with +// apply. +func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { + b := &ResourceClaimApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractResourceClaim extracts the applied configuration owned by fieldManager from +// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a +// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API. +// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "") +} + +// ExtractResourceClaimStatus is the same as ExtractResourceClaim except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "status") +} + +func extractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { + b := &ResourceClaimApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaim"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaim.Name) + b.WithNamespace(resourceClaim.Namespace) + + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go new file mode 100644 index 000000000..96196d7c9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use +// with apply. +type ResourceClaimConsumerReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Resource *string `json:"resource,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with +// apply. +func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { + return &ResourceClaimConsumerReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithResource(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithName(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithUID(value types.UID) *ResourceClaimConsumerReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go new file mode 100644 index 000000000..caab89acd --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourceClaimSchedulingStatusApplyConfiguration represents a declarative configuration of the ResourceClaimSchedulingStatus type for use +// with apply. +type ResourceClaimSchedulingStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + UnsuitableNodes []string `json:"unsuitableNodes,omitempty"` +} + +// ResourceClaimSchedulingStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimSchedulingStatus type for use with +// apply. +func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration { + return &ResourceClaimSchedulingStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithName(value string) *ResourceClaimSchedulingStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithUnsuitableNodes adds the given value to the UnsuitableNodes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the UnsuitableNodes field. +func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithUnsuitableNodes(values ...string) *ResourceClaimSchedulingStatusApplyConfiguration { + for i := range values { + b.UnsuitableNodes = append(b.UnsuitableNodes, values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go new file mode 100644 index 000000000..7c5b65681 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use +// with apply. +type ResourceClaimSpecApplyConfiguration struct { + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` + Controller *string `json:"controller,omitempty"` +} + +// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with +// apply. +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { + return &ResourceClaimSpecApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration { + b.Devices = value + return b +} + +// WithController sets the Controller field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Controller field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithController(value string) *ResourceClaimSpecApplyConfiguration { + b.Controller = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go new file mode 100644 index 000000000..a52af3ec3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use +// with apply. +type ResourceClaimStatusApplyConfiguration struct { + Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` + ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` + DeallocationRequested *bool `json:"deallocationRequested,omitempty"` +} + +// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with +// apply. +func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { + return &ResourceClaimStatusApplyConfiguration{} +} + +// WithAllocation sets the Allocation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Allocation field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithAllocation(value *AllocationResultApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + b.Allocation = value + return b +} + +// WithReservedFor adds the given value to the ReservedFor field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ReservedFor field. +func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*ResourceClaimConsumerReferenceApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReservedFor") + } + b.ReservedFor = append(b.ReservedFor, *values[i]) + } + return b +} + +// WithDeallocationRequested sets the DeallocationRequested field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeallocationRequested field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithDeallocationRequested(value bool) *ResourceClaimStatusApplyConfiguration { + b.DeallocationRequested = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 000000000..6f371d0c0 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use +// with apply. +type ResourceClaimTemplateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with +// apply. +func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { + b := &ResourceClaimTemplateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from +// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a +// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API. +// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") +} + +// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") +} + +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { + b := &ResourceClaimTemplateApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaimTemplate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaimTemplate.Name) + b.WithNamespace(resourceClaimTemplate.Namespace) + + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go new file mode 100644 index 000000000..5b03ab755 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go @@ -0,0 +1,194 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use +// with apply. +type ResourceClaimTemplateSpecApplyConfiguration struct { + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with +// apply. +func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { + return &ResourceClaimTemplateSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go new file mode 100644 index 000000000..23825d137 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use +// with apply. +type ResourcePoolApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"` +} + +// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with +// apply. +func ResourcePool() *ResourcePoolApplyConfiguration { + return &ResourcePoolApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration { + b.Name = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration { + b.Generation = &value + return b +} + +// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceSliceCount field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration { + b.ResourceSliceCount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go new file mode 100644 index 000000000..aaad68612 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use +// with apply. +type ResourceSliceApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with +// apply. +func ResourceSlice(name string) *ResourceSliceApplyConfiguration { + b := &ResourceSliceApplyConfiguration{} + b.WithName(name) + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractResourceSlice extracts the applied configuration owned by fieldManager from +// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a +// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API. +// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "") +} + +// ExtractResourceSliceStatus is the same as ExtractResourceSlice except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "status") +} + +func extractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { + b := &ResourceSliceApplyConfiguration{} + err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceSlice"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceSlice.Name) + + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go new file mode 100644 index 000000000..2ded75907 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use +// with apply. +type ResourceSliceSpecApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + AllNodes *bool `json:"allNodes,omitempty"` + Devices []DeviceApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with +// apply. +func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration { + return &ResourceSliceSpecApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration { + b.NodeName = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithAllNodes sets the AllNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllNodes field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration { + b.AllNodes = &value + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go index 5d528ea7c..f2f135abc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithSelfLink(value string) *PriorityClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *Prio return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithClusterName(value string) *PriorityClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PriorityClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -291,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go index 2b2aac316..098517675 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithSelfLink(value string) *PriorityClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *Prio return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithClusterName(value string) *PriorityClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PriorityClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -291,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go index 14b1feea6..075862fe3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -127,15 +127,6 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithSelfLink(value string) *PriorityClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -245,15 +236,6 @@ func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *Prio return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithClusterName(value string) *PriorityClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *PriorityClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -291,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go index cf9073a0f..39d835702 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct { Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithSelfLink(value string) *CSIDriverApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDrive return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithClusterName(value string) *CSIDriverApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CSIDriverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -263,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go index 1dc17ce96..b2dcb0fee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/storage/v1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` @@ -32,9 +32,10 @@ type CSIDriverSpecApplyConfiguration struct { FSGroupPolicy *v1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` RequiresRepublish *bool `json:"requiresRepublish,omitempty"` + SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} @@ -102,3 +103,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithRequiresRepublish(value bool) *CSI b.RequiresRepublish = &value return b } + +// WithSELinuxMount sets the SELinuxMount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxMount field is set to the value of the last call. +func (b *CSIDriverSpecApplyConfiguration) WithSELinuxMount(value bool) *CSIDriverSpecApplyConfiguration { + b.SELinuxMount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go index e65582d89..8a53e7984 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct { Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithSelfLink(value string) *CSINodeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithClusterName(value string) *CSINodeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CSINodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -263,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go index 6219ef115..8c69e435e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go index 30d1d4546..21d3ba7cc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go new file mode 100644 index 000000000..0e293248d --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go @@ -0,0 +1,283 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + storagev1 "k8s.io/api/storage/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use +// with apply. +type CSIStorageCapacityApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + Capacity *resource.Quantity `json:"capacity,omitempty"` + MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` +} + +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with +// apply. +func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { + b := &CSIStorageCapacityApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("CSIStorageCapacity") + b.WithAPIVersion("storage.k8s.io/v1") + return b +} + +// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from +// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a +// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API. +// ExtractCSIStorageCapacity provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { + return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "") +} + +// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { + return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status") +} + +func extractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { + b := &CSIStorageCapacityApplyConfiguration{} + err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1.CSIStorageCapacity"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(cSIStorageCapacity.Name) + b.WithNamespace(cSIStorageCapacity.Namespace) + + b.WithKind("CSIStorageCapacity") + b.WithAPIVersion("storage.k8s.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *CSIStorageCapacityApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithNodeTopology sets the NodeTopology field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeTopology field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithNodeTopology(value *v1.LabelSelectorApplyConfiguration) *CSIStorageCapacityApplyConfiguration { + b.NodeTopology = value + return b +} + +// WithStorageClassName sets the StorageClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StorageClassName field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithStorageClassName(value string) *CSIStorageCapacityApplyConfiguration { + b.StorageClassName = &value + return b +} + +// WithCapacity sets the Capacity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Capacity field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithCapacity(value resource.Quantity) *CSIStorageCapacityApplyConfiguration { + b.Capacity = &value + return b +} + +// WithMaximumVolumeSize sets the MaximumVolumeSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaximumVolumeSize field is set to the value of the last call. +func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resource.Quantity) *CSIStorageCapacityApplyConfiguration { + b.MaximumVolumeSize = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go index 2df999c24..26d70bc8b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go @@ -29,7 +29,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct { AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -131,15 +131,6 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithSelfLink(value string) *StorageClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -249,15 +240,6 @@ func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *Stora return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithClusterName(value string) *StorageClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *StorageClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -332,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go index 6665a1ff2..77b96db2f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go index 5fd3d4d8e..72c351208 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithSelfLink(value string) *VolumeAttachmentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *V return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithClusterName(value string) *VolumeAttachmentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *VolumeAttachmentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go index 2bf3f7720..477855398 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go index a55f7c8ea..896539235 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go index 015b08e6e..14293376d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go index 4bf829f8a..039e5f32b 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go index 3c5fd3dc2..735853c48 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go index a7ad0717b..aa949e28c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -129,15 +129,6 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithSelfLink(value string) *CSIStorageCapacityApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -247,15 +238,6 @@ func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithClusterName(value string) *CSIStorageCapacityApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CSIStorageCapacityApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -293,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go index 7a3091964..9648621ac 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithSelfLink(value string) *VolumeAttachmentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *V return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithClusterName(value string) *VolumeAttachmentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *VolumeAttachmentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go index 82872cc35..be7da5dd1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go index 2710ff886..e97487a64 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go index 43803496e..a287fc6b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go new file mode 100644 index 000000000..f95bc5547 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go @@ -0,0 +1,268 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use +// with apply. +type VolumeAttributesClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DriverName *string `json:"driverName,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` +} + +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with +// apply. +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { + b := &VolumeAttributesClassApplyConfiguration{} + b.WithName(name) + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1alpha1") + return b +} + +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") +} + +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") +} + +func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { + b := &VolumeAttributesClassApplyConfiguration{} + err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(volumeAttributesClass.Name) + + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParameters puts the entries into the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Parameters field, +// overwriting an existing map entries in Parameters field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + if b.Parameters == nil && len(entries) > 0 { + b.Parameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Parameters[k] = v + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go index cbff16fd0..ef8f6bbe6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go index 4ff0fcdf1..b9a807bd8 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct { Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithSelfLink(value string) *CSIDriverApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDrive return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithClusterName(value string) *CSIDriverApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CSIDriverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -263,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go index 1d943cbff..5f4e068f0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go @@ -22,7 +22,7 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` @@ -32,9 +32,10 @@ type CSIDriverSpecApplyConfiguration struct { FSGroupPolicy *v1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` RequiresRepublish *bool `json:"requiresRepublish,omitempty"` + SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} @@ -102,3 +103,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithRequiresRepublish(value bool) *CSI b.RequiresRepublish = &value return b } + +// WithSELinuxMount sets the SELinuxMount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxMount field is set to the value of the last call. +func (b *CSIDriverSpecApplyConfiguration) WithSELinuxMount(value bool) *CSIDriverSpecApplyConfiguration { + b.SELinuxMount = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go index fce97b456..af0f41cf0 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct { Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -123,15 +123,6 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithSelfLink(value string) *CSINodeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -241,15 +232,6 @@ func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApp return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithClusterName(value string) *CSINodeApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CSINodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -263,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go index 2c7de497b..65ad771bb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go index 94ff1b461..c9cbea1d9 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go index afd2e2a9e..19350e5a6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -129,15 +129,6 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithSelfLink(value string) *CSIStorageCapacityApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -247,15 +238,6 @@ func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithClusterName(value string) *CSIStorageCapacityApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *CSIStorageCapacityApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -293,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go index a4b924ee8..fa504a44e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go @@ -29,7 +29,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -43,7 +43,7 @@ type StorageClassApplyConfiguration struct { AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -131,15 +131,6 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithSelfLink(value string) *StorageClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -249,15 +240,6 @@ func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *Stora return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithClusterName(value string) *StorageClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *StorageClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -332,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go index 89c99d560..e0f2df28e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go index 553bfee98..b0711d731 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -124,15 +124,6 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume return b } -// WithSelfLink sets the SelfLink field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelfLink field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithSelfLink(value string) *VolumeAttachmentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.SelfLink = &value - return b -} - // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. @@ -242,15 +233,6 @@ func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *V return b } -// WithClusterName sets the ClusterName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterName field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithClusterName(value string) *VolumeAttachmentApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ClusterName = &value - return b -} - func (b *VolumeAttachmentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} @@ -272,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go index 9700b38ee..b08dd3148 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go index 1d5e304bb..3bdaeb45d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go index fa1855a24..f7046cdb3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 000000000..7b221d277 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,268 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use +// with apply. +type VolumeAttributesClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DriverName *string `json:"driverName,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` +} + +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with +// apply. +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { + b := &VolumeAttributesClassApplyConfiguration{} + b.WithName(name) + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b +} + +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") +} + +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") +} + +func extractVolumeAttributesClass(volumeAttributesClass *v1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { + b := &VolumeAttributesClassApplyConfiguration{} + err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(volumeAttributesClass.Name) + + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParameters puts the entries into the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Parameters field, +// overwriting an existing map entries in Parameters field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + if b.Parameters == nil && len(entries) > 0 { + b.Parameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Parameters[k] = v + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go index 3f255fce7..fec1c9ade 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go index 4b69b64c9..b42c9decc 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go +++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go new file mode 100644 index 000000000..c8f9f009a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// GroupVersionResourceApplyConfiguration represents a declarative configuration of the GroupVersionResource type for use +// with apply. +type GroupVersionResourceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Version *string `json:"version,omitempty"` + Resource *string `json:"resource,omitempty"` +} + +// GroupVersionResourceApplyConfiguration constructs a declarative configuration of the GroupVersionResource type for use with +// apply. +func GroupVersionResource() *GroupVersionResourceApplyConfiguration { + return &GroupVersionResourceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *GroupVersionResourceApplyConfiguration) WithGroup(value string) *GroupVersionResourceApplyConfiguration { + b.Group = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *GroupVersionResourceApplyConfiguration) WithVersion(value string) *GroupVersionResourceApplyConfiguration { + b.Version = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *GroupVersionResourceApplyConfiguration) WithResource(value string) *GroupVersionResourceApplyConfiguration { + b.Resource = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go new file mode 100644 index 000000000..dcdbc60c7 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go @@ -0,0 +1,81 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use +// with apply. +type MigrationConditionApplyConfiguration struct { + Type *v1alpha1.MigrationConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with +// apply. +func MigrationCondition() *MigrationConditionApplyConfiguration { + return &MigrationConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithType(value v1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *MigrationConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastUpdateTime sets the LastUpdateTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastUpdateTime field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithLastUpdateTime(value metav1.Time) *MigrationConditionApplyConfiguration { + b.LastUpdateTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithReason(value string) *MigrationConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *MigrationConditionApplyConfiguration) WithMessage(value string) *MigrationConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go new file mode 100644 index 000000000..7e6452a77 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// StorageVersionMigrationApplyConfiguration represents a declarative configuration of the StorageVersionMigration type for use +// with apply. +type StorageVersionMigrationApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *StorageVersionMigrationSpecApplyConfiguration `json:"spec,omitempty"` + Status *StorageVersionMigrationStatusApplyConfiguration `json:"status,omitempty"` +} + +// StorageVersionMigration constructs a declarative configuration of the StorageVersionMigration type for use with +// apply. +func StorageVersionMigration(name string) *StorageVersionMigrationApplyConfiguration { + b := &StorageVersionMigrationApplyConfiguration{} + b.WithName(name) + b.WithKind("StorageVersionMigration") + b.WithAPIVersion("storagemigration.k8s.io/v1alpha1") + return b +} + +// ExtractStorageVersionMigration extracts the applied configuration owned by fieldManager from +// storageVersionMigration. If no managedFields are found in storageVersionMigration for fieldManager, a +// StorageVersionMigrationApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// storageVersionMigration must be a unmodified StorageVersionMigration API object that was retrieved from the Kubernetes API. +// ExtractStorageVersionMigration provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractStorageVersionMigration(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) { + return extractStorageVersionMigration(storageVersionMigration, fieldManager, "") +} + +// ExtractStorageVersionMigrationStatus is the same as ExtractStorageVersionMigration except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractStorageVersionMigrationStatus(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) { + return extractStorageVersionMigration(storageVersionMigration, fieldManager, "status") +} + +func extractStorageVersionMigration(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string, subresource string) (*StorageVersionMigrationApplyConfiguration, error) { + b := &StorageVersionMigrationApplyConfiguration{} + err := managedfields.ExtractInto(storageVersionMigration, internal.Parser().Type("io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(storageVersionMigration.Name) + + b.WithKind("StorageVersionMigration") + b.WithAPIVersion("storagemigration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *StorageVersionMigrationApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string) *StorageVersionMigrationApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *StorageVersionMigrationApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *StorageVersionMigrationApplyConfiguration) WithFinalizers(values ...string) *StorageVersionMigrationApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *StorageVersionMigrationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithSpec(value *StorageVersionMigrationSpecApplyConfiguration) *StorageVersionMigrationApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *StorageVersionMigrationApplyConfiguration) WithStatus(value *StorageVersionMigrationStatusApplyConfiguration) *StorageVersionMigrationApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionMigrationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go new file mode 100644 index 000000000..02ddb540f --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// StorageVersionMigrationSpecApplyConfiguration represents a declarative configuration of the StorageVersionMigrationSpec type for use +// with apply. +type StorageVersionMigrationSpecApplyConfiguration struct { + Resource *GroupVersionResourceApplyConfiguration `json:"resource,omitempty"` + ContinueToken *string `json:"continueToken,omitempty"` +} + +// StorageVersionMigrationSpecApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationSpec type for use with +// apply. +func StorageVersionMigrationSpec() *StorageVersionMigrationSpecApplyConfiguration { + return &StorageVersionMigrationSpecApplyConfiguration{} +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *StorageVersionMigrationSpecApplyConfiguration) WithResource(value *GroupVersionResourceApplyConfiguration) *StorageVersionMigrationSpecApplyConfiguration { + b.Resource = value + return b +} + +// WithContinueToken sets the ContinueToken field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContinueToken field is set to the value of the last call. +func (b *StorageVersionMigrationSpecApplyConfiguration) WithContinueToken(value string) *StorageVersionMigrationSpecApplyConfiguration { + b.ContinueToken = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go new file mode 100644 index 000000000..fc957cb15 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// StorageVersionMigrationStatusApplyConfiguration represents a declarative configuration of the StorageVersionMigrationStatus type for use +// with apply. +type StorageVersionMigrationStatusApplyConfiguration struct { + Conditions []MigrationConditionApplyConfiguration `json:"conditions,omitempty"` + ResourceVersion *string `json:"resourceVersion,omitempty"` +} + +// StorageVersionMigrationStatusApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationStatus type for use with +// apply. +func StorageVersionMigrationStatus() *StorageVersionMigrationStatusApplyConfiguration { + return &StorageVersionMigrationStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *StorageVersionMigrationStatusApplyConfiguration) WithConditions(values ...*MigrationConditionApplyConfiguration) *StorageVersionMigrationStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *StorageVersionMigrationStatusApplyConfiguration) WithResourceVersion(value string) *StorageVersionMigrationStatusApplyConfiguration { + b.ResourceVersion = &value + return b +} diff --git a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go new file mode 100644 index 000000000..f5eaaedab --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go @@ -0,0 +1,278 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + + apidiscovery "k8s.io/api/apidiscovery/v2" + apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// StaleGroupVersionError encasulates failed GroupVersion marked "stale" +// in the returned AggregatedDiscovery format. +type StaleGroupVersionError struct { + gv schema.GroupVersion +} + +func (s StaleGroupVersionError) Error() string { + return fmt.Sprintf("stale GroupVersion discovery: %v", s.gv) +} + +// SplitGroupsAndResources transforms "aggregated" discovery top-level structure into +// the previous "unaggregated" discovery groups and resources. +func SplitGroupsAndResources(aggregatedGroups apidiscovery.APIGroupDiscoveryList) ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Aggregated group list will contain the entirety of discovery, including + // groups, versions, and resources. GroupVersions marked "stale" are failed. + groups := []*metav1.APIGroup{} + failedGVs := map[schema.GroupVersion]error{} + resourcesByGV := map[schema.GroupVersion]*metav1.APIResourceList{} + for _, aggGroup := range aggregatedGroups.Items { + group, resources, failed := convertAPIGroup(aggGroup) + groups = append(groups, group) + for gv, resourceList := range resources { + resourcesByGV[gv] = resourceList + } + for gv, err := range failed { + failedGVs[gv] = err + } + } + // Transform slice of groups to group list before returning. + groupList := &metav1.APIGroupList{} + groupList.Groups = make([]metav1.APIGroup, 0, len(groups)) + for _, group := range groups { + groupList.Groups = append(groupList.Groups, *group) + } + return groupList, resourcesByGV, failedGVs +} + +// convertAPIGroup tranforms an "aggregated" APIGroupDiscovery to an "legacy" APIGroup, +// also returning the map of APIResourceList for resources within GroupVersions. +func convertAPIGroup(g apidiscovery.APIGroupDiscovery) ( + *metav1.APIGroup, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Iterate through versions to convert to group and resources. + group := &metav1.APIGroup{} + gvResources := map[schema.GroupVersion]*metav1.APIResourceList{} + failedGVs := map[schema.GroupVersion]error{} + group.Name = g.ObjectMeta.Name + for _, v := range g.Versions { + gv := schema.GroupVersion{Group: g.Name, Version: v.Version} + if v.Freshness == apidiscovery.DiscoveryFreshnessStale { + failedGVs[gv] = StaleGroupVersionError{gv: gv} + continue + } + version := metav1.GroupVersionForDiscovery{} + version.GroupVersion = gv.String() + version.Version = v.Version + group.Versions = append(group.Versions, version) + // PreferredVersion is first non-stale Version + if group.PreferredVersion == (metav1.GroupVersionForDiscovery{}) { + group.PreferredVersion = version + } + resourceList := &metav1.APIResourceList{} + resourceList.GroupVersion = gv.String() + for _, r := range v.Resources { + resource, err := convertAPIResource(r) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, resource) + } + // Subresources field in new format get transformed into full APIResources. + // It is possible a partial result with an error was returned to be used + // as the parent resource for the subresource. + for _, subresource := range r.Subresources { + sr, err := convertAPISubresource(resource, subresource) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, sr) + } + } + } + gvResources[gv] = resourceList + } + return group, gvResources, failedGVs +} + +var emptyKind = metav1.GroupVersionKind{} + +// convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are +// resilient to missing GVK, since this resource might be the parent resource +// for a subresource. If the parent is missing a GVK, it is not returned in +// discovery, and the subresource MUST have the GVK. +func convertAPIResource(in apidiscovery.APIResourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{ + Name: in.Resource, + SingularName: in.SingularResource, + Namespaced: in.Scope == apidiscovery.ScopeNamespace, + Verbs: in.Verbs, + ShortNames: in.ShortNames, + Categories: in.Categories, + } + var err error + if in.ResponseKind != nil && (*in.ResponseKind) != emptyKind { + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + } else { + err = fmt.Errorf("discovery resource %s missing GVK", in.Resource) + } + // Can return partial result with error, which can be the parent for a + // subresource. Do not add this result to the returned discovery resources. + return result, err +} + +// convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. +func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{} + if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { + return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) + } + result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) + result.SingularName = parent.SingularName + result.Namespaced = parent.Namespaced + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + result.Verbs = in.Verbs + return result, nil +} + +// Please note the functions below will be removed in v1.33. They facilitate conversion +// between the deprecated type apidiscoveryv2beta1.APIGroupDiscoveryList. + +// SplitGroupsAndResourcesV2Beta1 transforms "aggregated" discovery top-level structure into +// the previous "unaggregated" discovery groups and resources. +// Deprecated: Please use SplitGroupsAndResources +func SplitGroupsAndResourcesV2Beta1(aggregatedGroups apidiscoveryv2beta1.APIGroupDiscoveryList) ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Aggregated group list will contain the entirety of discovery, including + // groups, versions, and resources. GroupVersions marked "stale" are failed. + groups := []*metav1.APIGroup{} + failedGVs := map[schema.GroupVersion]error{} + resourcesByGV := map[schema.GroupVersion]*metav1.APIResourceList{} + for _, aggGroup := range aggregatedGroups.Items { + group, resources, failed := convertAPIGroupv2beta1(aggGroup) + groups = append(groups, group) + for gv, resourceList := range resources { + resourcesByGV[gv] = resourceList + } + for gv, err := range failed { + failedGVs[gv] = err + } + } + // Transform slice of groups to group list before returning. + groupList := &metav1.APIGroupList{} + groupList.Groups = make([]metav1.APIGroup, 0, len(groups)) + for _, group := range groups { + groupList.Groups = append(groupList.Groups, *group) + } + return groupList, resourcesByGV, failedGVs +} + +// convertAPIGroupv2beta1 tranforms an "aggregated" APIGroupDiscovery to an "legacy" APIGroup, +// also returning the map of APIResourceList for resources within GroupVersions. +func convertAPIGroupv2beta1(g apidiscoveryv2beta1.APIGroupDiscovery) ( + *metav1.APIGroup, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error) { + // Iterate through versions to convert to group and resources. + group := &metav1.APIGroup{} + gvResources := map[schema.GroupVersion]*metav1.APIResourceList{} + failedGVs := map[schema.GroupVersion]error{} + group.Name = g.ObjectMeta.Name + for _, v := range g.Versions { + gv := schema.GroupVersion{Group: g.Name, Version: v.Version} + if v.Freshness == apidiscoveryv2beta1.DiscoveryFreshnessStale { + failedGVs[gv] = StaleGroupVersionError{gv: gv} + continue + } + version := metav1.GroupVersionForDiscovery{} + version.GroupVersion = gv.String() + version.Version = v.Version + group.Versions = append(group.Versions, version) + // PreferredVersion is first non-stale Version + if group.PreferredVersion == (metav1.GroupVersionForDiscovery{}) { + group.PreferredVersion = version + } + resourceList := &metav1.APIResourceList{} + resourceList.GroupVersion = gv.String() + for _, r := range v.Resources { + resource, err := convertAPIResourcev2beta1(r) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, resource) + } + // Subresources field in new format get transformed into full APIResources. + // It is possible a partial result with an error was returned to be used + // as the parent resource for the subresource. + for _, subresource := range r.Subresources { + sr, err := convertAPISubresourcev2beta1(resource, subresource) + if err == nil { + resourceList.APIResources = append(resourceList.APIResources, sr) + } + } + } + gvResources[gv] = resourceList + } + return group, gvResources, failedGVs +} + +// convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are +// resilient to missing GVK, since this resource might be the parent resource +// for a subresource. If the parent is missing a GVK, it is not returned in +// discovery, and the subresource MUST have the GVK. +func convertAPIResourcev2beta1(in apidiscoveryv2beta1.APIResourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{ + Name: in.Resource, + SingularName: in.SingularResource, + Namespaced: in.Scope == apidiscoveryv2beta1.ScopeNamespace, + Verbs: in.Verbs, + ShortNames: in.ShortNames, + Categories: in.Categories, + } + // Can return partial result with error, which can be the parent for a + // subresource. Do not add this result to the returned discovery resources. + if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { + return result, fmt.Errorf("discovery resource %s missing GVK", in.Resource) + } + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + return result, nil +} + +// convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. +func convertAPISubresourcev2beta1(parent metav1.APIResource, in apidiscoveryv2beta1.APISubresourceDiscovery) (metav1.APIResource, error) { + result := metav1.APIResource{} + if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { + return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) + } + result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) + result.SingularName = parent.SingularName + result.Namespaced = parent.Namespaced + result.Group = in.ResponseKind.Group + result.Version = in.ResponseKind.Version + result.Kind = in.ResponseKind.Kind + result.Verbs = in.Verbs + return result, nil +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 50e59c5d8..ef14fee5f 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -19,7 +19,9 @@ package discovery import ( "context" "encoding/json" + goerrors "errors" "fmt" + "mime" "net/http" "net/url" "sort" @@ -29,8 +31,10 @@ import ( //nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs "github.com/golang/protobuf/proto" - openapi_v2 "github.com/googleapis/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" + apidiscoveryv2 "k8s.io/api/apidiscovery/v2" + apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -39,6 +43,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/openapi" restclient "k8s.io/client-go/rest" ) @@ -46,12 +51,29 @@ const ( // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. CustomResourceDefinitions). defaultRetries = 2 // protobuf mime type - mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" + openAPIV2mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" + // defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient. // Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist. defaultTimeout = 32 * time.Second + + // defaultBurst is the default burst to be used with the discovery client's token bucket rate limiter + defaultBurst = 300 + + AcceptV1 = runtime.ContentTypeJSON + // Aggregated discovery content-type (v2beta1). NOTE: content-type parameters + // MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient + // to ordering when comparing returned values in "Content-Type" header). + AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" + AcceptV2 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2;as=APIGroupDiscoveryList" + // Prioritize aggregated discovery by placing first in the order of discovery accept types. + acceptDiscoveryFormats = AcceptV2 + "," + AcceptV2Beta1 + "," + AcceptV1 ) +// Aggregated discovery content-type GVK. +var v2Beta1GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2beta1", Kind: "APIGroupDiscoveryList"} +var v2GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2", Kind: "APIGroupDiscoveryList"} + // DiscoveryInterface holds the methods that discover server-supported API groups, // versions and resources. type DiscoveryInterface interface { @@ -60,6 +82,20 @@ type DiscoveryInterface interface { ServerResourcesInterface ServerVersionInterface OpenAPISchemaInterface + OpenAPIV3SchemaInterface + // Returns copy of current discovery client that will only + // receive the legacy discovery format, or pointer to current + // discovery client if it does not support legacy-only discovery. + WithLegacy() DiscoveryInterface +} + +// AggregatedDiscoveryInterface extends DiscoveryInterface to include a method to possibly +// return discovery resources along with the discovery groups, which is what the newer +// aggregated discovery format does (APIGroupDiscoveryList). +type AggregatedDiscoveryInterface interface { + DiscoveryInterface + + GroupsAndMaybeResources() (*metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. @@ -90,13 +126,6 @@ type ServerGroupsInterface interface { type ServerResourcesInterface interface { // ServerResourcesForGroupVersion returns the supported resources for a group and version. ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) - // ServerResources returns the supported resources for all groups and versions. - // - // The returned resource list might be non-nil with partial results even in the case of - // non-nil error. - // - // Deprecated: use ServerGroupsAndResources instead. - ServerResources() ([]*metav1.APIResourceList, error) // ServerGroupsAndResources returns the supported groups and resources for all groups and versions. // // The returned group and resource lists might be non-nil with partial results even in the @@ -128,14 +157,22 @@ type OpenAPISchemaInterface interface { OpenAPISchema() (*openapi_v2.Document, error) } +type OpenAPIV3SchemaInterface interface { + OpenAPIV3() openapi.Client +} + // DiscoveryClient implements the functions that discover server-supported API groups, // versions and resources. type DiscoveryClient struct { restClient restclient.Interface LegacyPrefix string + // Forces the client to request only "unaggregated" (legacy) discovery. + UseLegacyDiscovery bool } +var _ AggregatedDiscoveryInterface = &DiscoveryClient{} + // Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so // group would be "". func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { @@ -153,36 +190,201 @@ func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.API return } -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { - // Get the groupVersions exposed at /api - v := &metav1.APIVersions{} - err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do(context.TODO()).Into(v) - apiGroup := metav1.APIGroup{} - if err == nil && len(v.Versions) != 0 { - apiGroup = apiVersionsToAPIGroup(v) - } - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err +// GroupsAndMaybeResources returns the discovery groups, and (if new aggregated +// discovery format) the resources keyed by group/version. Merges discovery groups +// and resources from /api and /apis (either aggregated or not). Legacy groups +// must be ordered first. The server will either return both endpoints (/api, /apis) +// as aggregated discovery format or legacy format. For safety, resources will only +// be returned if both endpoints returned resources. Returned "failedGVs" can be +// empty, but will only be nil in the case an error is returned. +func (d *DiscoveryClient) GroupsAndMaybeResources() ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error, + error) { + // Legacy group ordered first (there is only one -- core/v1 group). Returned groups must + // be non-nil, but it could be empty. Returned resources, apiResources map could be nil. + groups, resources, failedGVs, err := d.downloadLegacy() + if err != nil { + return nil, nil, nil, err + } + // Discovery groups and (possibly) resources downloaded from /apis. + apiGroups, apiResources, failedApisGVs, aerr := d.downloadAPIs() + if aerr != nil { + return nil, nil, nil, aerr } + // Merge apis groups into the legacy groups. + for _, group := range apiGroups.Groups { + groups.Groups = append(groups.Groups, group) + } + // For safety, only return resources if both endpoints returned resources. + if resources != nil && apiResources != nil { + for gv, resourceList := range apiResources { + resources[gv] = resourceList + } + } else if resources != nil { + resources = nil + } + // Merge failed GroupVersions from /api and /apis + for gv, err := range failedApisGVs { + failedGVs[gv] = err + } + return groups, resources, failedGVs, err +} - // Get the groupVersions exposed at /apis - apiGroupList = &metav1.APIGroupList{} - err = d.restClient.Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList) - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err +// downloadLegacy returns the discovery groups and possibly resources +// for the legacy v1 GVR at /api, or an error if one occurred. It is +// possible for the resource map to be nil if the server returned +// the unaggregated discovery. Returned "failedGVs" can be empty, but +// will only be nil in the case of a returned error. +func (d *DiscoveryClient) downloadLegacy() ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error, + error) { + accept := acceptDiscoveryFormats + if d.UseLegacyDiscovery { + accept = AcceptV1 + } + var responseContentType string + body, err := d.restClient.Get(). + AbsPath("/api"). + SetHeader("Accept", accept). + Do(context.TODO()). + ContentType(&responseContentType). + Raw() + apiGroupList := &metav1.APIGroupList{} + failedGVs := map[schema.GroupVersion]error{} + if err != nil { + // Tolerate 404, since aggregated api servers can return it. + if errors.IsNotFound(err) { + // Return empty structures and no error. + emptyGVMap := map[schema.GroupVersion]*metav1.APIResourceList{} + return apiGroupList, emptyGVMap, failedGVs, nil + } else { + return nil, nil, nil, err + } + } + + var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + } else if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2beta1.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResourcesV2Beta1(aggregatedDiscovery) + } else { + // Default is unaggregated discovery v1. + var v metav1.APIVersions + err = json.Unmarshal(body, &v) + if err != nil { + return nil, nil, nil, err + } + apiGroup := metav1.APIGroup{} + if len(v.Versions) != 0 { + apiGroup = apiVersionsToAPIGroup(&v) + } + apiGroupList.Groups = []metav1.APIGroup{apiGroup} + } + + return apiGroupList, resourcesByGV, failedGVs, nil +} + +// downloadAPIs returns the discovery groups and (if aggregated format) the +// discovery resources. The returned groups will always exist, but the +// resources map may be nil. Returned "failedGVs" can be empty, but will +// only be nil in the case of a returned error. +func (d *DiscoveryClient) downloadAPIs() ( + *metav1.APIGroupList, + map[schema.GroupVersion]*metav1.APIResourceList, + map[schema.GroupVersion]error, + error) { + accept := acceptDiscoveryFormats + if d.UseLegacyDiscovery { + accept = AcceptV1 + } + var responseContentType string + body, err := d.restClient.Get(). + AbsPath("/apis"). + SetHeader("Accept", accept). + Do(context.TODO()). + ContentType(&responseContentType). + Raw() + if err != nil { + return nil, nil, nil, err + } + + apiGroupList := &metav1.APIGroupList{} + failedGVs := map[schema.GroupVersion]error{} + var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList + // Based on the content-type server responded with: aggregated or unaggregated. + if isGVK, _ := ContentTypeIsGVK(responseContentType, v2GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + } else if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK { + var aggregatedDiscovery apidiscoveryv2beta1.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResourcesV2Beta1(aggregatedDiscovery) + } else { + // Default is unaggregated discovery v1. + err = json.Unmarshal(body, apiGroupList) + if err != nil { + return nil, nil, nil, err + } } - // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api - if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - apiGroupList = &metav1.APIGroupList{} + + return apiGroupList, resourcesByGV, failedGVs, nil +} + +// ContentTypeIsGVK checks of the content-type string is both +// "application/json" and matches the provided GVK. An error +// is returned if the content type string is malformed. +// NOTE: This function is resilient to the ordering of the +// content-type parameters, as well as parameters added by +// intermediaries such as proxies or gateways. Examples: +// +// ("application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil) +// ("application/json", any GVK) = (false, nil) +// ("application/json; charset=UTF-8", any GVK) = (false, nil) +// ("malformed content type string", any GVK) = (false, error) +func ContentTypeIsGVK(contentType string, gvk schema.GroupVersionKind) (bool, error) { + base, params, err := mime.ParseMediaType(contentType) + if err != nil { + return false, err } + gvkMatch := runtime.ContentTypeJSON == base && + params["g"] == gvk.Group && + params["v"] == gvk.Version && + params["as"] == gvk.Kind + return gvkMatch, nil +} - // prepend the group retrieved from /api to the list if not empty - if len(v.Versions) != 0 { - apiGroupList.Groups = append([]metav1.APIGroup{apiGroup}, apiGroupList.Groups...) +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. +func (d *DiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { + groups, _, _, err := d.GroupsAndMaybeResources() + if err != nil { + return nil, err } - return apiGroupList, nil + return groups, nil } // ServerResourcesForGroupVersion returns the supported resources for a group and version. @@ -201,8 +403,10 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r } err = d.restClient.Get().AbsPath(url.String()).Do(context.TODO()).Into(resources) if err != nil { - // ignore 403 or 404 error to be compatible with an v1.0 server. - if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + // Tolerate core/v1 not found response by returning empty resource list; + // this probably should not happen. But we should verify all callers are + // not depending on this toleration before removal. + if groupVersion == "v1" && errors.IsNotFound(err) { return resources, nil } return nil, err @@ -210,13 +414,6 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r return resources, nil } -// ServerResources returns the supported resources for all groups and versions. -// Deprecated: use ServerGroupsAndResources instead. -func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - _, rs, err := d.ServerGroupsAndResources() - return rs, err -} - // ServerGroupsAndResources returns the supported resources for all groups and versions. func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { @@ -240,6 +437,12 @@ func (e *ErrGroupDiscoveryFailed) Error() string { return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) } +// Is makes it possible for the callers to use `errors.Is(` helper on errors wrapped with ErrGroupDiscoveryFailed error. +func (e *ErrGroupDiscoveryFailed) Is(target error) bool { + _, ok := target.(*ErrGroupDiscoveryFailed) + return ok +} + // IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover // a complete list of APIs for the client to use. func IsGroupDiscoveryFailedError(err error) bool { @@ -247,15 +450,34 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } -// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. -// Deprecated: use ServerGroupsAndResources instead. -func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - _, rs, err := ServerGroupsAndResources(d) - return rs, err +// GroupDiscoveryFailedErrorGroups returns true if the error is an ErrGroupDiscoveryFailed error, +// along with the map of group versions that failed discovery. +func GroupDiscoveryFailedErrorGroups(err error) (map[schema.GroupVersion]error, bool) { + var groupDiscoveryError *ErrGroupDiscoveryFailed + if err != nil && goerrors.As(err, &groupDiscoveryError) { + return groupDiscoveryError.Groups, true + } + return nil, false } func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - sgs, err := d.ServerGroups() + var sgs *metav1.APIGroupList + var resources []*metav1.APIResourceList + var failedGVs map[schema.GroupVersion]error + var err error + + // If the passed discovery object implements the wider AggregatedDiscoveryInterface, + // then attempt to retrieve aggregated discovery with both groups and the resources. + if ad, ok := d.(AggregatedDiscoveryInterface); ok { + var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList + sgs, resourcesByGV, failedGVs, err = ad.GroupsAndMaybeResources() + for _, resourceList := range resourcesByGV { + resources = append(resources, resourceList) + } + } else { + sgs, err = d.ServerGroups() + } + if sgs == nil { return nil, nil, err } @@ -263,6 +485,16 @@ func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*meta for i := range sgs.Groups { resultGroups = append(resultGroups, &sgs.Groups[i]) } + // resources is non-nil if aggregated discovery succeeded. + if resources != nil { + // Any stale Group/Versions returned by aggregated discovery + // must be surfaced to the caller as failed Group/Versions. + var ferr error + if len(failedGVs) > 0 { + ferr = &ErrGroupDiscoveryFailed{Groups: failedGVs} + } + return resultGroups, resources, ferr + } groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) @@ -286,12 +518,27 @@ func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*meta // ServerPreferredResources uses the provided discovery interface to look up preferred resources func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - serverGroupList, err := d.ServerGroups() + var serverGroupList *metav1.APIGroupList + var failedGroups map[schema.GroupVersion]error + var groupVersionResources map[schema.GroupVersion]*metav1.APIResourceList + var err error + + // If the passed discovery object implements the wider AggregatedDiscoveryInterface, + // then it is attempt to retrieve both the groups and the resources. "failedGroups" + // are Group/Versions returned as stale in AggregatedDiscovery format. + ad, ok := d.(AggregatedDiscoveryInterface) + if ok { + serverGroupList, groupVersionResources, failedGroups, err = ad.GroupsAndMaybeResources() + } else { + serverGroupList, err = d.ServerGroups() + } if err != nil { return nil, err } - - groupVersionResources, failedGroups := fetchGroupVersionResources(d, serverGroupList) + // Non-aggregated discovery must fetch resources from Groups. + if groupVersionResources == nil { + groupVersionResources, failedGroups = fetchGroupVersionResources(d, serverGroupList) + } result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource @@ -420,20 +667,11 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { return &info, nil } -// OpenAPISchema fetches the open api schema using a rest client and parses the proto. +// OpenAPISchema fetches the open api v2 schema using a rest client and parses the proto. func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do(context.TODO()).Raw() + data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", openAPIV2mimePb).Do(context.TODO()).Raw() if err != nil { - if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { - // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO: remove this when kubectl/client-go don't work with 1.9 server - data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do(context.TODO()).Raw() - if err != nil { - return nil, err - } - } else { - return nil, err - } + return nil, err } document := &openapi_v2.Document{} err = proto.Unmarshal(data, document) @@ -443,6 +681,18 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { return document, nil } +func (d *DiscoveryClient) OpenAPIV3() openapi.Client { + return openapi.NewClient(d.restClient) +} + +// WithLegacy returns copy of current discovery client that will only +// receive the legacy discovery format. +func (d *DiscoveryClient) WithLegacy() DiscoveryInterface { + client := *d + client.UseLegacyDiscovery = true + return &client +} + // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var result []*metav1.APIResourceList @@ -466,12 +716,13 @@ func setDiscoveryDefaults(config *restclient.Config) error { if config.Timeout == 0 { config.Timeout = defaultTimeout } - if config.Burst == 0 && config.QPS < 100 { + // if a burst limit is not already configured + if config.Burst == 0 { // discovery is expected to be bursty, increase the default burst // to accommodate looking up resource info for many API groups. // matches burst set by ConfigFlags#ToDiscoveryClient(). // see https://issue.k8s.io/86149 - config.Burst = 100 + config.Burst = defaultBurst } codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) @@ -506,7 +757,7 @@ func NewDiscoveryClientForConfigAndClient(c *restclient.Config, httpClient *http return nil, err } client, err := restclient.UnversionedRESTClientForConfigAndClient(&config, httpClient) - return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err + return &DiscoveryClient{restClient: client, LegacyPrefix: "/api", UseLegacyDiscovery: false}, err } // NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If @@ -522,7 +773,7 @@ func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { // NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { - return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} + return &DiscoveryClient{restClient: c, LegacyPrefix: "/api", UseLegacyDiscovery: false} } // RESTClient returns a RESTClient that is used to communicate diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go index 3bfe514e8..e79f073b0 100644 --- a/vendor/k8s.io/client-go/discovery/helper.go +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -19,12 +19,33 @@ package discovery import ( "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" apimachineryversion "k8s.io/apimachinery/pkg/version" ) +// IsResourceEnabled queries the server to determine if the resource specified is present on the server. +// This is particularly helpful when writing a controller or an e2e test that requires a particular resource to function. +func IsResourceEnabled(client DiscoveryInterface, resourceToCheck schema.GroupVersionResource) (bool, error) { + // this is a single request. The ServerResourcesForGroupVersion handles the core v1 group as legacy. + resourceList, err := client.ServerResourcesForGroupVersion(resourceToCheck.GroupVersion().String()) + if apierrors.IsNotFound(err) { // if the discovery endpoint isn't present, then the resource isn't present. + return false, nil + } + if err != nil { + return false, err + } + for _, actualResource := range resourceList.APIResources { + if actualResource.Name == resourceToCheck.Resource { + return true, nil + } + } + + return false, nil +} + // MatchesServerVersion queries the server to compares the build version // (git hash) of the client with the server's build version. It returns an error // if it failed to contact the server or if the versions are not an exact match. diff --git a/vendor/k8s.io/client-go/dynamic/fake/simple.go b/vendor/k8s.io/client-go/dynamic/fake/simple.go index dee16b245..5d0a6f69f 100644 --- a/vendor/k8s.io/client-go/dynamic/fake/simple.go +++ b/vendor/k8s.io/client-go/dynamic/fake/simple.go @@ -387,7 +387,9 @@ func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOption } list := &unstructured.UnstructuredList{} + list.SetRemainingItemCount(entireList.GetRemainingItemCount()) list.SetResourceVersion(entireList.GetResourceVersion()) + list.SetContinue(entireList.GetContinue()) list.GetObjectKind().SetGroupVersionKind(listGVK) for i := range entireList.Items { item := &entireList.Items[i] @@ -454,6 +456,50 @@ func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types return ret, err } +// TODO: opts are currently ignored. +func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { + outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, err + } + var uncastRet runtime.Object + switch { + case len(c.namespace) == 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchAction(c.resource, name, types.ApplyPatchType, outBytes), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) == 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewRootPatchSubresourceAction(c.resource, name, types.ApplyPatchType, outBytes, subresources...), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) > 0 && len(subresources) == 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchAction(c.resource, c.namespace, name, types.ApplyPatchType, outBytes), &metav1.Status{Status: "dynamic patch fail"}) + + case len(c.namespace) > 0 && len(subresources) > 0: + uncastRet, err = c.client.Fake. + Invokes(testing.NewPatchSubresourceAction(c.resource, c.namespace, name, types.ApplyPatchType, outBytes, subresources...), &metav1.Status{Status: "dynamic patch fail"}) + + } + + if err != nil { + return nil, err + } + if uncastRet == nil { + return nil, err + } + + ret := &unstructured.Unstructured{} + if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { + return nil, err + } + return ret, nil +} + +func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error) { + return c.Apply(ctx, name, obj, options, "status") +} + func convertObjectsToUnstructured(s *runtime.Scheme, objs []runtime.Object) ([]runtime.Object, error) { ul := make([]runtime.Object, 0, len(objs)) diff --git a/vendor/k8s.io/client-go/dynamic/interface.go b/vendor/k8s.io/client-go/dynamic/interface.go index b08067c34..a310b63e5 100644 --- a/vendor/k8s.io/client-go/dynamic/interface.go +++ b/vendor/k8s.io/client-go/dynamic/interface.go @@ -40,6 +40,8 @@ type ResourceInterface interface { List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) + Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) + ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error) } type NamespaceableResourceInterface interface { diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 87594bf2e..326da7cbd 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,13 +30,16 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" ) -type dynamicClient struct { - client *rest.RESTClient +type DynamicClient struct { + client rest.Interface } -var _ Interface = &dynamicClient{} +var _ Interface = &DynamicClient{} // ConfigFor returns a copy of the provided config with the // appropriate dynamic client defaults set. @@ -50,9 +54,14 @@ func ConfigFor(inConfig *rest.Config) *rest.Config { return config } -// NewForConfigOrDie creates a new Interface for the given config and +// New creates a new DynamicClient for the given RESTClient. +func New(c rest.Interface) *DynamicClient { + return &DynamicClient{client: c} +} + +// NewForConfigOrDie creates a new DynamicClient for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) Interface { +func NewForConfigOrDie(c *rest.Config) *DynamicClient { ret, err := NewForConfig(c) if err != nil { panic(err) @@ -63,7 +72,7 @@ func NewForConfigOrDie(c *rest.Config) Interface { // NewForConfig creates a new dynamic client or returns an error. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(inConfig *rest.Config) (Interface, error) { +func NewForConfig(inConfig *rest.Config) (*DynamicClient, error) { config := ConfigFor(inConfig) httpClient, err := rest.HTTPClientFor(config) @@ -75,7 +84,7 @@ func NewForConfig(inConfig *rest.Config) (Interface, error) { // NewForConfigAndClient creates a new dynamic client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) { +func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (*DynamicClient, error) { config := ConfigFor(inConfig) // for serializing the options config.GroupVersion = &schema.GroupVersion{} @@ -85,16 +94,16 @@ func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, er if err != nil { return nil, err } - return &dynamicClient{client: restClient}, nil + return &DynamicClient{client: restClient}, nil } type dynamicResourceClient struct { - client *dynamicClient + client *DynamicClient namespace string resource schema.GroupVersionResource } -func (c *dynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface { +func (c *DynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface { return &dynamicResourceClient{client: c, resource: resource} } @@ -120,6 +129,9 @@ func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Un return nil, fmt.Errorf("name is required") } } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } result := c.client.client. Post(). @@ -152,6 +164,9 @@ func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Un if len(name) == 0 { return nil, fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, err @@ -188,7 +203,9 @@ func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructu if len(name) == 0 { return nil, fmt.Errorf("name is required") } - + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return nil, err @@ -220,6 +237,9 @@ func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts me if len(name) == 0 { return fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return err + } deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) if err != nil { return err @@ -235,6 +255,10 @@ func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts me } func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return err + } + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) if err != nil { return err @@ -254,6 +278,9 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav if len(name) == 0 { return nil, fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) if err := result.Error(); err != nil { return nil, err @@ -270,6 +297,27 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav } func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for %v, falling back to the standard LIST semantics, err = %v", c.resource, watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := c.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("watchlist request for %v", c.resource), c.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %v ended with an error, falling back to the standard LIST semantics, err = %v", c.resource, err) + } + result, err := c.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("list request for %v", c.resource), c.list, opts, result) + } + return result, err +} + +func (c *dynamicResourceClient) list(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return nil, err + } result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) if err := result.Error(); err != nil { return nil, err @@ -293,8 +341,32 @@ func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOption return list, nil } +// watchList establishes a watch stream with the server and returns an unstructured list. +func (c *dynamicResourceClient) watchList(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return nil, err + } + + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + + result := &unstructured.UnstructuredList{} + err := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + WatchList(ctx). + Into(result) + + return result, err +} + func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { opts.Watch = true + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { + return nil, err + } return c.client.client.Get().AbsPath(c.makeURLSegments("")...). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). Watch(ctx) @@ -304,6 +376,9 @@ func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types if len(name) == 0 { return nil, fmt.Errorf("name is required") } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } result := c.client.client. Patch(pt). AbsPath(append(c.makeURLSegments(name), subresources...)...). @@ -324,6 +399,65 @@ func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types return uncastObj.(*unstructured.Unstructured), nil } +func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { + return nil, err + } + outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + managedFields := accessor.GetManagedFields() + if len(managedFields) > 0 { + return nil, fmt.Errorf(`cannot apply an object with managed fields already set. + Use the client-go/applyconfigurations "UnstructructuredExtractor" to obtain the unstructured ApplyConfiguration for the given field manager that you can use/modify here to apply`) + } + patchOpts := opts.ToPatchOptions() + + result := c.client.client. + Patch(types.ApplyPatchType). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(outBytes). + SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + retBytes, err := result.Raw() + if err != nil { + return nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) + if err != nil { + return nil, err + } + return uncastObj.(*unstructured.Unstructured), nil +} +func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) { + return c.Apply(ctx, name, obj, opts, "status") +} + +func validateNamespaceWithOptionalName(namespace string, name ...string) error { + if msgs := rest.IsValidPathSegmentName(namespace); len(msgs) != 0 { + return fmt.Errorf("invalid namespace %q: %v", namespace, msgs) + } + if len(name) > 1 { + panic("Invalid number of names") + } else if len(name) == 1 { + if msgs := rest.IsValidPathSegmentName(name[0]); len(msgs) != 0 { + return fmt.Errorf("invalid resource name %q: %v", name[0], msgs) + } + } + return nil +} + func (c *dynamicResourceClient) makeURLSegments(name string) []string { url := []string{} if len(c.resource.Group) == 0 { diff --git a/vendor/k8s.io/client-go/features/envvar.go b/vendor/k8s.io/client-go/features/envvar.go new file mode 100644 index 000000000..8c3f887dc --- /dev/null +++ b/vendor/k8s.io/client-go/features/envvar.go @@ -0,0 +1,188 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "fmt" + "os" + "strconv" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/naming" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" +) + +// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common +// call chains, so they'd be unhelpful as names. +var internalPackages = []string{"k8s.io/client-go/features/envvar.go"} + +var _ Gates = &envVarFeatureGates{} + +// newEnvVarFeatureGates creates a feature gate that allows for registration +// of features and checking if the features are enabled. +// +// On the first call to Enabled, the effective state of all known features is loaded from +// environment variables. The environment variable read for a given feature is formed by +// concatenating the prefix "KUBE_FEATURE_" with the feature's name. +// +// For example, if you have a feature named "MyFeature" +// setting an environmental variable "KUBE_FEATURE_MyFeature" +// will allow you to configure the state of that feature. +// +// Please note that environmental variables can only be set to the boolean value. +// Incorrect values will be ignored and logged. +// +// Features can also be set directly via the Set method. +// In that case, these features take precedence over +// features set via environmental variables. +func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates { + known := map[Feature]FeatureSpec{} + for name, spec := range features { + known[name] = spec + } + + fg := &envVarFeatureGates{ + callSiteName: naming.GetNameFromCallsite(internalPackages...), + known: known, + } + fg.enabledViaEnvVar.Store(map[Feature]bool{}) + fg.enabledViaSetMethod = map[Feature]bool{} + + return fg +} + +// envVarFeatureGates implements Gates and allows for feature registration. +type envVarFeatureGates struct { + // callSiteName holds the name of the file + // that created this instance + callSiteName string + + // readEnvVarsOnce guards reading environmental variables + readEnvVarsOnce sync.Once + + // known holds known feature gates + known map[Feature]FeatureSpec + + // enabledViaEnvVar holds a map[Feature]bool + // with values explicitly set via env var + enabledViaEnvVar atomic.Value + + // lockEnabledViaSetMethod protects enabledViaSetMethod + lockEnabledViaSetMethod sync.RWMutex + + // enabledViaSetMethod holds values explicitly set + // via Set method, features stored in this map take + // precedence over features stored in enabledViaEnvVar + enabledViaSetMethod map[Feature]bool + + // readEnvVars holds the boolean value which + // indicates whether readEnvVarsOnce has been called. + readEnvVars atomic.Bool +} + +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. +func (f *envVarFeatureGates) Enabled(key Feature) bool { + if v, ok := f.wasFeatureEnabledViaSetMethod(key); ok { + // ensue that the state of all known features + // is loaded from environment variables + // on the first call to Enabled method. + if !f.hasAlreadyReadEnvVar() { + _ = f.getEnabledMapFromEnvVar() + } + return v + } + if v, ok := f.getEnabledMapFromEnvVar()[key]; ok { + return v + } + if v, ok := f.known[key]; ok { + return v.Default + } + panic(fmt.Errorf("feature %q is not registered in FeatureGates %q", key, f.callSiteName)) +} + +// Set sets the given feature to the given value. +// +// Features set via this method take precedence over +// the features set via environment variables. +func (f *envVarFeatureGates) Set(featureName Feature, featureValue bool) error { + feature, ok := f.known[featureName] + if !ok { + return fmt.Errorf("feature %q is not registered in FeatureGates %q", featureName, f.callSiteName) + } + if feature.LockToDefault && feature.Default != featureValue { + return fmt.Errorf("cannot set feature gate %q to %v, feature is locked to %v", featureName, featureValue, feature.Default) + } + + f.lockEnabledViaSetMethod.Lock() + defer f.lockEnabledViaSetMethod.Unlock() + f.enabledViaSetMethod[featureName] = featureValue + + return nil +} + +// getEnabledMapFromEnvVar will fill the enabled map on the first call. +// This is the only time a known feature can be set to a value +// read from the corresponding environmental variable. +func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool { + f.readEnvVarsOnce.Do(func() { + featureGatesState := map[Feature]bool{} + for feature, featureSpec := range f.known { + featureState, featureStateSet := os.LookupEnv(fmt.Sprintf("KUBE_FEATURE_%s", feature)) + if !featureStateSet { + continue + } + boolVal, boolErr := strconv.ParseBool(featureState) + switch { + case boolErr != nil: + utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, due to %v", feature, featureState, boolErr)) + case featureSpec.LockToDefault: + if boolVal != featureSpec.Default { + utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, feature is locked to %v", feature, featureState, featureSpec.Default)) + break + } + featureGatesState[feature] = featureSpec.Default + default: + featureGatesState[feature] = boolVal + } + } + f.enabledViaEnvVar.Store(featureGatesState) + f.readEnvVars.Store(true) + + for feature, featureSpec := range f.known { + if featureState, ok := featureGatesState[feature]; ok { + klog.V(1).InfoS("Feature gate updated state", "feature", feature, "enabled", featureState) + continue + } + klog.V(1).InfoS("Feature gate default state", "feature", feature, "enabled", featureSpec.Default) + } + }) + return f.enabledViaEnvVar.Load().(map[Feature]bool) +} + +func (f *envVarFeatureGates) wasFeatureEnabledViaSetMethod(key Feature) (bool, bool) { + f.lockEnabledViaSetMethod.RLock() + defer f.lockEnabledViaSetMethod.RUnlock() + + value, found := f.enabledViaSetMethod[key] + return value, found +} + +func (f *envVarFeatureGates) hasAlreadyReadEnvVar() bool { + return f.readEnvVars.Load() +} diff --git a/vendor/k8s.io/client-go/features/features.go b/vendor/k8s.io/client-go/features/features.go new file mode 100644 index 000000000..afb67f509 --- /dev/null +++ b/vendor/k8s.io/client-go/features/features.go @@ -0,0 +1,143 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "errors" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sync/atomic" +) + +// NOTE: types Feature, FeatureSpec, prerelease (and its values) +// were duplicated from the component-base repository +// +// for more information please refer to https://docs.google.com/document/d/1g9BGCRw-7ucUxO6OtCWbb3lfzUGA_uU9178wLdXAIfs + +const ( + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") + + // Deprecated + Deprecated = prerelease("DEPRECATED") +) + +type prerelease string + +type Feature string + +type FeatureSpec struct { + // Default is the default enablement state for the feature + Default bool + // LockToDefault indicates that the feature is locked to its default and cannot be changed + LockToDefault bool + // PreRelease indicates the maturity level of the feature + PreRelease prerelease +} + +// Gates indicates whether a given feature is enabled or not. +type Gates interface { + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool +} + +// Registry represents an external feature gates registry. +type Registry interface { + // Add adds existing feature gates to the provided registry. + // + // As of today, this method is used by AddFeaturesToExistingFeatureGates and + // ReplaceFeatureGates to take control of the features exposed by this library. + Add(map[Feature]FeatureSpec) error +} + +// FeatureGates returns the feature gates exposed by this library. +// +// By default, only the default features gates will be returned. +// The default implementation allows controlling the features +// via environmental variables. +// For example, if you have a feature named "MyFeature" +// setting an environmental variable "KUBE_FEATURE_MyFeature" +// will allow you to configure the state of that feature. +// +// Please note that the actual set of the feature gates +// might be overwritten by calling ReplaceFeatureGates method. +func FeatureGates() Gates { + return featureGates.Load().(*featureGatesWrapper).Gates +} + +// AddFeaturesToExistingFeatureGates adds the default feature gates to the provided registry. +// Usually this function is combined with ReplaceFeatureGates to take control of the +// features exposed by this library. +func AddFeaturesToExistingFeatureGates(registry Registry) error { + return registry.Add(defaultKubernetesFeatureGates) +} + +// ReplaceFeatureGates overwrites the default implementation of the feature gates +// used by this library. +// +// Useful for binaries that would like to have full control of the features +// exposed by this library, such as allowing consumers of a binary +// to interact with the features via a command line flag. +// +// For example: +// +// // first, register client-go's features to your registry. +// clientgofeaturegate.AddFeaturesToExistingFeatureGates(utilfeature.DefaultMutableFeatureGate) +// // then replace client-go's feature gates implementation with your implementation +// clientgofeaturegate.ReplaceFeatureGates(utilfeature.DefaultMutableFeatureGate) +func ReplaceFeatureGates(newFeatureGates Gates) { + if replaceFeatureGatesWithWarningIndicator(newFeatureGates) { + utilruntime.HandleError(errors.New("the default feature gates implementation has already been used and now it's being overwritten. This might lead to unexpected behaviour. Check your initialization order")) + } +} + +func replaceFeatureGatesWithWarningIndicator(newFeatureGates Gates) bool { + shouldProduceWarning := false + + if defaultFeatureGates, ok := FeatureGates().(*envVarFeatureGates); ok { + if defaultFeatureGates.hasAlreadyReadEnvVar() { + shouldProduceWarning = true + } + } + wrappedFeatureGates := &featureGatesWrapper{newFeatureGates} + featureGates.Store(wrappedFeatureGates) + + return shouldProduceWarning +} + +func init() { + envVarGates := newEnvVarFeatureGates(defaultKubernetesFeatureGates) + + wrappedFeatureGates := &featureGatesWrapper{envVarGates} + featureGates.Store(wrappedFeatureGates) +} + +// featureGatesWrapper a thin wrapper to satisfy featureGates variable (atomic.Value). +// That is, all calls to Store for a given Value must use values of the same concrete type. +type featureGatesWrapper struct { + Gates +} + +var ( + // featureGates is a shared global FeatureGates. + // + // Top-level commands/options setup that needs to modify this feature gates + // should use AddFeaturesToExistingFeatureGates followed by ReplaceFeatureGates. + featureGates = &atomic.Value{} +) diff --git a/vendor/k8s.io/client-go/features/known_features.go b/vendor/k8s.io/client-go/features/known_features.go new file mode 100644 index 000000000..0c972a46f --- /dev/null +++ b/vendor/k8s.io/client-go/features/known_features.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature featuregate.Feature = "MyFeature" + // + // Feature gates should be listed in alphabetical, case-sensitive + // (upper before any lower case character) order. This reduces the risk + // of code conflicts because changes are more likely to be scattered + // across the file. + + // owner: @p0lyn0mial + // beta: v1.30 + // + // Allow the client to get a stream of individual items instead of chunking from the server. + // + // NOTE: + // The feature is disabled in Beta by default because + // it will only be turned on for selected control plane component(s). + WatchListClient Feature = "WatchListClient" + + // owner: @nilekhc + // alpha: v1.30 + InformerResourceVersion Feature = "InformerResourceVersion" +) + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// +// To add a new feature, define a key for it above and add it here. +// After registering with the binary, the features are, by default, controllable using environment variables. +// For more details, please see envVarFeatureGates implementation. +var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{ + WatchListClient: {Default: false, PreRelease: Beta}, + InformerResourceVersion: {Default: false, PreRelease: Alpha}, +} diff --git a/vendor/k8s.io/client-go/gentype/type.go b/vendor/k8s.io/client-go/gentype/type.go new file mode 100644 index 000000000..b5be84318 --- /dev/null +++ b/vendor/k8s.io/client-go/gentype/type.go @@ -0,0 +1,360 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gentype + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" +) + +// objectWithMeta matches objects implementing both runtime.Object and metav1.Object. +type objectWithMeta interface { + runtime.Object + metav1.Object +} + +// namedObject matches comparable objects implementing GetName(); it is intended for use with apply declarative configurations. +type namedObject interface { + comparable + GetName() *string +} + +// Client represents a client, optionally namespaced, with no support for lists or apply declarative configurations. +type Client[T objectWithMeta] struct { + resource string + client rest.Interface + namespace string // "" for non-namespaced clients + newObject func() T + parameterCodec runtime.ParameterCodec +} + +// ClientWithList represents a client with support for lists. +type ClientWithList[T objectWithMeta, L runtime.Object] struct { + *Client[T] + alsoLister[T, L] +} + +// ClientWithApply represents a client with support for apply declarative configurations. +type ClientWithApply[T objectWithMeta, C namedObject] struct { + *Client[T] + alsoApplier[T, C] +} + +// ClientWithListAndApply represents a client with support for lists and apply declarative configurations. +type ClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct { + *Client[T] + alsoLister[T, L] + alsoApplier[T, C] +} + +// Helper types for composition +type alsoLister[T objectWithMeta, L runtime.Object] struct { + client *Client[T] + newList func() L +} + +type alsoApplier[T objectWithMeta, C namedObject] struct { + client *Client[T] +} + +// NewClient constructs a client, namespaced or not, with no support for lists or apply. +// Non-namespaced clients are constructed by passing an empty namespace (""). +func NewClient[T objectWithMeta]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, +) *Client[T] { + return &Client[T]{ + resource: resource, + client: client, + parameterCodec: parameterCodec, + namespace: namespace, + newObject: emptyObjectCreator, + } +} + +// NewClientWithList constructs a namespaced client with support for lists. +func NewClientWithList[T objectWithMeta, L runtime.Object]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, +) *ClientWithList[T, L] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithList[T, L]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + } +} + +// NewClientWithApply constructs a namespaced client with support for apply declarative configurations. +func NewClientWithApply[T objectWithMeta, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, +) *ClientWithApply[T, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithApply[T, C]{ + typeClient, + alsoApplier[T, C]{typeClient}, + } +} + +// NewClientWithListAndApply constructs a client with support for lists and applying declarative configurations. +func NewClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, +) *ClientWithListAndApply[T, L, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator) + return &ClientWithListAndApply[T, L, C]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + alsoApplier[T, C]{typeClient}, + } +} + +// GetClient returns the REST interface. +func (c *Client[T]) GetClient() rest.Interface { + return c.client +} + +// GetNamespace returns the client's namespace, if any. +func (c *Client[T]) GetNamespace() string { + return c.namespace +} + +// Get takes name of the resource, and returns the corresponding object, and an error if there is any. +func (c *Client[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) { + result := c.newObject() + err := c.client.Get(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + VersionedParams(&options, c.parameterCodec). + Do(ctx). + Into(result) + return result, err +} + +// List takes label and field selectors, and returns the list of resources that match those selectors. +func (l *alsoLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (L, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := l.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, "watchlist request for "+l.client.resource, l.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %s ended with an error, falling back to the standard LIST semantics, err = %v", l.client.resource, err) + } + result, err := l.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, "list request for "+l.client.resource, l.list, opts, result) + } + return result, err +} + +func (l *alsoLister[T, L]) list(ctx context.Context, opts metav1.ListOptions) (L, error) { + list := l.newList() + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + err := l.client.client.Get(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + Do(ctx). + Into(list) + return list, err +} + +// watchList establishes a watch stream with the server and returns the list of resources. +func (l *alsoLister[T, L]) watchList(ctx context.Context, opts metav1.ListOptions) (result L, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = l.newList() + err = l.client.client.Get(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + WatchList(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resources. +func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resource and creates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions) (T, error) { + result := c.newObject() + err := c.client.Post(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// UpdateStatus updates the status subresource of a resource. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + SubResource("status"). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Delete takes name of the resource and deletes it. Returns an error if one occurs. +func (c *Client[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return l.client.client.Delete(). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&listOpts, l.client.parameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resource. +func (c *Client[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) { + result := c.newObject() + err := c.client.Patch(pt). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, c.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resource. +func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + result := a.client.newObject() + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(obj) + if err != nil { + return *new(T), err + } + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + err = a.client.client.Patch(types.ApplyPatchType). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + VersionedParams(&patchOpts, a.client.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it to the status subresource and returns the applied resource. +func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(obj) + if err != nil { + return *new(T), err + } + + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + + result := a.client.newObject() + err = a.client.client.Patch(types.ApplyPatchType). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + SubResource("status"). + VersionedParams(&patchOpts, a.client.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index e46c0537f..9cddb0bbe 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -24,12 +24,14 @@ import ( discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authenticationv1alpha1 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" @@ -40,8 +42,10 @@ import ( batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" certificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" + certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + coordinationv1alpha1 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" @@ -49,10 +53,12 @@ import ( eventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" + flowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" @@ -62,12 +68,14 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" ) @@ -75,12 +83,14 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface + AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface + AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface AuthorizationV1() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface @@ -92,6 +102,8 @@ type Interface interface { BatchV1beta1() batchv1beta1.BatchV1beta1Interface CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface + CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -100,10 +112,12 @@ type Interface interface { EventsV1() eventsv1.EventsV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface + FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface + FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface NetworkingV1() networkingv1.NetworkingV1Interface + NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface NodeV1() nodev1.NodeV1Interface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface @@ -113,63 +127,72 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface + StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient - admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client - admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client - internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client - appsV1 *appsv1.AppsV1Client - appsV1beta1 *appsv1beta1.AppsV1beta1Client - appsV1beta2 *appsv1beta2.AppsV1beta2Client - authenticationV1 *authenticationv1.AuthenticationV1Client - authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client - authorizationV1 *authorizationv1.AuthorizationV1Client - authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client - autoscalingV1 *autoscalingv1.AutoscalingV1Client - autoscalingV2 *autoscalingv2.AutoscalingV2Client - autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client - autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client - batchV1 *batchv1.BatchV1Client - batchV1beta1 *batchv1beta1.BatchV1beta1Client - certificatesV1 *certificatesv1.CertificatesV1Client - certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client - coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client - coordinationV1 *coordinationv1.CoordinationV1Client - coreV1 *corev1.CoreV1Client - discoveryV1 *discoveryv1.DiscoveryV1Client - discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client - eventsV1 *eventsv1.EventsV1Client - eventsV1beta1 *eventsv1beta1.EventsV1beta1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client - flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client - flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client - networkingV1 *networkingv1.NetworkingV1Client - networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client - nodeV1 *nodev1.NodeV1Client - nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client - nodeV1beta1 *nodev1beta1.NodeV1beta1Client - policyV1 *policyv1.PolicyV1Client - policyV1beta1 *policyv1beta1.PolicyV1beta1Client - rbacV1 *rbacv1.RbacV1Client - rbacV1beta1 *rbacv1beta1.RbacV1beta1Client - rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client - schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client - schedulingV1 *schedulingv1.SchedulingV1Client - storageV1beta1 *storagev1beta1.StorageV1beta1Client - storageV1 *storagev1.StorageV1Client - storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client + admissionregistrationV1 *admissionregistrationv1.AdmissionregistrationV1Client + admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client + appsV1 *appsv1.AppsV1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2 *autoscalingv2.AutoscalingV2Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + certificatesV1 *certificatesv1.CertificatesV1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client + coordinationV1alpha1 *coordinationv1alpha1.CoordinationV1alpha1Client + coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client + coordinationV1 *coordinationv1.CoordinationV1Client + coreV1 *corev1.CoreV1Client + discoveryV1 *discoveryv1.DiscoveryV1Client + discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client + eventsV1 *eventsv1.EventsV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + flowcontrolV1 *flowcontrolv1.FlowcontrolV1Client + flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client + flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client + flowcontrolV1beta3 *flowcontrolv1beta3.FlowcontrolV1beta3Client + networkingV1 *networkingv1.NetworkingV1Client + networkingV1alpha1 *networkingv1alpha1.NetworkingV1alpha1Client + networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1 *nodev1.NodeV1Client + nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client + nodeV1beta1 *nodev1beta1.NodeV1beta1Client + policyV1 *policyv1.PolicyV1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + resourceV1alpha3 *resourcev1alpha3.ResourceV1alpha3Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + schedulingV1 *schedulingv1.SchedulingV1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client + storagemigrationV1alpha1 *storagemigrationv1alpha1.StoragemigrationV1alpha1Client } // AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client @@ -177,6 +200,11 @@ func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.Admissionr return c.admissionregistrationV1 } +// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client +func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { + return c.admissionregistrationV1alpha1 +} + // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { return c.admissionregistrationV1beta1 @@ -207,6 +235,11 @@ func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interfac return c.authenticationV1 } +// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client +func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return c.authenticationV1alpha1 +} + // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return c.authenticationV1beta1 @@ -262,6 +295,16 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } +// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client +func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + return c.certificatesV1alpha1 +} + +// CoordinationV1alpha1 retrieves the CoordinationV1alpha1Client +func (c *Clientset) CoordinationV1alpha1() coordinationv1alpha1.CoordinationV1alpha1Interface { + return c.coordinationV1alpha1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -302,9 +345,9 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return c.extensionsV1beta1 } -// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client -func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { - return c.flowcontrolV1alpha1 +// FlowcontrolV1 retrieves the FlowcontrolV1Client +func (c *Clientset) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { + return c.flowcontrolV1 } // FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client @@ -317,11 +360,21 @@ func (c *Clientset) FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2In return c.flowcontrolV1beta2 } +// FlowcontrolV1beta3 retrieves the FlowcontrolV1beta3Client +func (c *Clientset) FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface { + return c.flowcontrolV1beta3 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } +// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client +func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { + return c.networkingV1alpha1 +} + // NetworkingV1beta1 retrieves the NetworkingV1beta1Client func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { return c.networkingV1beta1 @@ -367,6 +420,11 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { + return c.resourceV1alpha3 +} + // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { return c.schedulingV1alpha1 @@ -397,6 +455,11 @@ func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return c.storageV1alpha1 } +// StoragemigrationV1alpha1 retrieves the StoragemigrationV1alpha1Client +func (c *Clientset) StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface { + return c.storagemigrationV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -445,6 +508,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -469,6 +536,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.authenticationV1alpha1, err = authenticationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -513,6 +584,14 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.certificatesV1alpha1, err = certificatesv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.coordinationV1alpha1, err = coordinationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -545,7 +624,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.flowcontrolV1, err = flowcontrolv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -557,10 +636,18 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.flowcontrolV1beta3, err = flowcontrolv1beta3.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.networkingV1, err = networkingv1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } + cs.networkingV1alpha1, err = networkingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -597,6 +684,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.resourceV1alpha3, err = resourcev1alpha3.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -621,6 +712,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.storagemigrationV1alpha1, err = storagemigrationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -643,12 +738,14 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1 = admissionregistrationv1.New(c) + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.internalV1alpha1 = internalv1alpha1.New(c) cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) cs.authenticationV1 = authenticationv1.New(c) + cs.authenticationV1alpha1 = authenticationv1alpha1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) cs.authorizationV1beta1 = authorizationv1beta1.New(c) @@ -660,6 +757,8 @@ func New(c rest.Interface) *Clientset { cs.batchV1beta1 = batchv1beta1.New(c) cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) + cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) + cs.coordinationV1alpha1 = coordinationv1alpha1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -668,10 +767,12 @@ func New(c rest.Interface) *Clientset { cs.eventsV1 = eventsv1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) - cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) + cs.flowcontrolV1 = flowcontrolv1.New(c) cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) + cs.flowcontrolV1beta3 = flowcontrolv1beta3.New(c) cs.networkingV1 = networkingv1.New(c) + cs.networkingV1alpha1 = networkingv1alpha1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) cs.nodeV1 = nodev1.New(c) cs.nodeV1alpha1 = nodev1alpha1.New(c) @@ -681,12 +782,14 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) + cs.resourceV1alpha3 = resourcev1alpha3.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) cs.storageV1alpha1 = storagev1alpha1.New(c) + cs.storagemigrationV1alpha1 = storagemigrationv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index b272334ad..e052f81b8 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -1,5 +1,5 @@ /* -Copyright The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package kubernetes +// Package kubernetes holds packages which implement a clientset for Kubernetes +// APIs. +package kubernetes // import "k8s.io/client-go/kubernetes" diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 24046233b..5262b0f04 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -20,12 +20,14 @@ package scheme import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" authorizationv1beta1 "k8s.io/api/authorization/v1beta1" @@ -36,8 +38,10 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha1 "k8s.io/api/coordination/v1alpha1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -45,10 +49,12 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -58,12 +64,14 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -76,12 +84,14 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ admissionregistrationv1.AddToScheme, + admissionregistrationv1alpha1.AddToScheme, admissionregistrationv1beta1.AddToScheme, internalv1alpha1.AddToScheme, appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, authenticationv1.AddToScheme, + authenticationv1alpha1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, authorizationv1beta1.AddToScheme, @@ -93,6 +103,8 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv1beta1.AddToScheme, certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, + certificatesv1alpha1.AddToScheme, + coordinationv1alpha1.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -101,10 +113,12 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, - flowcontrolv1alpha1.AddToScheme, + flowcontrolv1.AddToScheme, flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, + flowcontrolv1beta3.AddToScheme, networkingv1.AddToScheme, + networkingv1alpha1.AddToScheme, networkingv1beta1.AddToScheme, nodev1.AddToScheme, nodev1alpha1.AddToScheme, @@ -114,12 +128,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, + resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, storagev1alpha1.AddToScheme, + storagemigrationv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go index 10848bed1..a81b2b682 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -29,6 +29,8 @@ import ( type AdmissionregistrationV1Interface interface { RESTClient() rest.Interface MutatingWebhookConfigurationsGetter + ValidatingAdmissionPoliciesGetter + ValidatingAdmissionPolicyBindingsGetter ValidatingWebhookConfigurationsGetter } @@ -41,6 +43,14 @@ func (c *AdmissionregistrationV1Client) MutatingWebhookConfigurations() Mutating return newMutatingWebhookConfigurations(c) } +func (c *AdmissionregistrationV1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { + return newValidatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1Client) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface { + return newValidatingAdmissionPolicyBindings(c) +} + func (c *AdmissionregistrationV1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { return newValidatingWebhookConfigurations(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go index a5b062e37..d81e1c87f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/generated_expansion.go @@ -20,4 +20,8 @@ package v1 type MutatingWebhookConfigurationExpansion interface{} +type ValidatingAdmissionPolicyExpansion interface{} + +type ValidatingAdmissionPolicyBindingExpansion interface{} + type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go index edbc826d1..e863766c6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface { // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.MutatingWebhookConfiguration, *v1.MutatingWebhookConfigurationList, *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.MutatingWebhookConfiguration { return &v1.MutatingWebhookConfiguration{} }, + func() *v1.MutatingWebhookConfigurationList { return &v1.MutatingWebhookConfigurationList{} }), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go new file mode 100644 index 000000000..1b20e6960 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPoliciesGetter interface { + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface +} + +// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. +type ValidatingAdmissionPolicyInterface interface { + Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + ValidatingAdmissionPolicyExpansion +} + +// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type validatingAdmissionPolicies struct { + *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration] +} + +// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies +func newValidatingAdmissionPolicies(c *AdmissionregistrationV1Client) *validatingAdmissionPolicies { + return &validatingAdmissionPolicies{ + gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicy, *v1.ValidatingAdmissionPolicyList, *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingAdmissionPolicy { return &v1.ValidatingAdmissionPolicy{} }, + func() *v1.ValidatingAdmissionPolicyList { return &v1.ValidatingAdmissionPolicyList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..44694b232 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPolicyBindingsGetter interface { + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface +} + +// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. +type ValidatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) + ValidatingAdmissionPolicyBindingExpansion +} + +// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type validatingAdmissionPolicyBindings struct { + *gentype.ClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration] +} + +// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings +func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1Client) *validatingAdmissionPolicyBindings { + return &validatingAdmissionPolicyBindings{ + gentype.NewClientWithListAndApply[*v1.ValidatingAdmissionPolicyBinding, *v1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingAdmissionPolicyBinding { return &v1.ValidatingAdmissionPolicyBinding{} }, + func() *v1.ValidatingAdmissionPolicyBindingList { return &v1.ValidatingAdmissionPolicyBindingList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go index 065e3c834..11b4ac059 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type ValidatingWebhookConfigurationInterface interface { // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ValidatingWebhookConfiguration, *v1.ValidatingWebhookConfigurationList, *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ValidatingWebhookConfiguration { return &v1.ValidatingWebhookConfiguration{} }, + func() *v1.ValidatingWebhookConfigurationList { return &v1.ValidatingWebhookConfigurationList{} }), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go new file mode 100644 index 000000000..f6102d25a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1alpha1Interface interface { + RESTClient() rest.Interface + ValidatingAdmissionPoliciesGetter + ValidatingAdmissionPolicyBindingsGetter +} + +// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1alpha1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { + return newValidatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1alpha1Client) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface { + return newValidatingAdmissionPolicyBindings(c) +} + +// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AdmissionregistrationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmissionregistrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { + return &AdmissionregistrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go similarity index 57% rename from vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index 5070cb91b..94562da59 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -1,6 +1,3 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - /* Copyright The Kubernetes Authors. @@ -17,17 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by defaulter-gen. DO NOT EDIT. +// Code generated by client-gen. DO NOT EDIT. package v1alpha1 -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) +type ValidatingAdmissionPolicyExpansion interface{} -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} +type ValidatingAdmissionPolicyBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go new file mode 100644 index 000000000..c2b7c825c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPoliciesGetter interface { + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface +} + +// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. +type ValidatingAdmissionPolicyInterface interface { + Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + ValidatingAdmissionPolicyExpansion +} + +// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type validatingAdmissionPolicies struct { + *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration] +} + +// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies +func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies { + return &validatingAdmissionPolicies{ + gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicy, *v1alpha1.ValidatingAdmissionPolicyList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ValidatingAdmissionPolicy { return &v1alpha1.ValidatingAdmissionPolicy{} }, + func() *v1alpha1.ValidatingAdmissionPolicyList { return &v1alpha1.ValidatingAdmissionPolicyList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..d8d0796ea --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPolicyBindingsGetter interface { + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface +} + +// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. +type ValidatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) + ValidatingAdmissionPolicyBindingExpansion +} + +// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type validatingAdmissionPolicyBindings struct { + *gentype.ClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration] +} + +// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings +func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings { + return &validatingAdmissionPolicyBindings{ + gentype.NewClientWithListAndApply[*v1alpha1.ValidatingAdmissionPolicyBinding, *v1alpha1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ValidatingAdmissionPolicyBinding { return &v1alpha1.ValidatingAdmissionPolicyBinding{} }, + func() *v1alpha1.ValidatingAdmissionPolicyBindingList { + return &v1alpha1.ValidatingAdmissionPolicyBindingList{} + }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 8fda84b1d..5a0a17d9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -29,6 +29,8 @@ import ( type AdmissionregistrationV1beta1Interface interface { RESTClient() rest.Interface MutatingWebhookConfigurationsGetter + ValidatingAdmissionPoliciesGetter + ValidatingAdmissionPolicyBindingsGetter ValidatingWebhookConfigurationsGetter } @@ -41,6 +43,14 @@ func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() Mut return newMutatingWebhookConfigurations(c) } +func (c *AdmissionregistrationV1beta1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { + return newValidatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1beta1Client) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface { + return newValidatingAdmissionPolicyBindings(c) +} + func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { return newValidatingWebhookConfigurations(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go index 2aeb9c98a..56ad611f4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -20,4 +20,8 @@ package v1beta1 type MutatingWebhookConfigurationExpansion interface{} +type ValidatingAdmissionPolicyExpansion interface{} + +type ValidatingAdmissionPolicyBindingExpansion interface{} + type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index ca6bb8bd5..7a5bc8b9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -55,143 +52,18 @@ type MutatingWebhookConfigurationInterface interface { // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.MutatingWebhookConfiguration, *v1beta1.MutatingWebhookConfigurationList, *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.MutatingWebhookConfiguration { return &v1beta1.MutatingWebhookConfiguration{} }, + func() *v1beta1.MutatingWebhookConfigurationList { return &v1beta1.MutatingWebhookConfigurationList{} }), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go new file mode 100644 index 000000000..0023d8837 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPoliciesGetter interface { + ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface +} + +// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. +type ValidatingAdmissionPolicyInterface interface { + Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + ValidatingAdmissionPolicyExpansion +} + +// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface +type validatingAdmissionPolicies struct { + *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration] +} + +// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies +func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies { + return &validatingAdmissionPolicies{ + gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicy, *v1beta1.ValidatingAdmissionPolicyList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingAdmissionPolicy { return &v1beta1.ValidatingAdmissionPolicy{} }, + func() *v1beta1.ValidatingAdmissionPolicyList { return &v1beta1.ValidatingAdmissionPolicyList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go new file mode 100644 index 000000000..8168d8cbc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type ValidatingAdmissionPolicyBindingsGetter interface { + ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface +} + +// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. +type ValidatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) + ValidatingAdmissionPolicyBindingExpansion +} + +// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface +type validatingAdmissionPolicyBindings struct { + *gentype.ClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration] +} + +// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings +func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings { + return &validatingAdmissionPolicyBindings{ + gentype.NewClientWithListAndApply[*v1beta1.ValidatingAdmissionPolicyBinding, *v1beta1.ValidatingAdmissionPolicyBindingList, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingAdmissionPolicyBinding { return &v1beta1.ValidatingAdmissionPolicyBinding{} }, + func() *v1beta1.ValidatingAdmissionPolicyBindingList { + return &v1beta1.ValidatingAdmissionPolicyBindingList{} + }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 5ba5974d7..5abd96823 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -55,143 +52,20 @@ type ValidatingWebhookConfigurationInterface interface { // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ValidatingWebhookConfiguration, *v1beta1.ValidatingWebhookConfigurationList, *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ValidatingWebhookConfiguration { return &v1beta1.ValidatingWebhookConfiguration{} }, + func() *v1beta1.ValidatingWebhookConfigurationList { + return &v1beta1.ValidatingWebhookConfigurationList{} + }), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go index 18789c7f8..436593f7f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageVersionsGetter has a method to return a StorageVersionInterface. @@ -43,6 +40,7 @@ type StorageVersionsGetter interface { type StorageVersionInterface interface { Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type StorageVersionInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) StorageVersionExpansion } // storageVersions implements StorageVersionInterface type storageVersions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration] } // newStorageVersions returns a StorageVersions func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { return &storageVersions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.StorageVersion, *v1alpha1.StorageVersionList, *apiserverinternalv1alpha1.StorageVersionApplyConfiguration]( + "storageversions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.StorageVersion { return &v1alpha1.StorageVersion{} }, + func() *v1alpha1.StorageVersionList { return &v1alpha1.StorageVersionList{} }), } } - -// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. -func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Get(). - Resource("storageversions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. -func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.StorageVersionList{} - err = c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageVersions. -func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Post(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. -func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageversions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageversions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageVersion. -func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(pt). - Resource("storageversions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersion. -func (c *storageVersions) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *storageVersions) ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index f4b198265..252f47ba2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ControllerRevision, *v1.ControllerRevisionList, *appsv1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ControllerRevision { return &v1.ControllerRevision{} }, + func() *v1.ControllerRevisionList { return &v1.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 53e539287..28917a796 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.DaemonSet, *v1.DaemonSetList, *appsv1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.DaemonSet { return &v1.DaemonSet{} }, + func() *v1.DaemonSetList { return &v1.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index ccc2049ff..871d51cfe 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -45,6 +44,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Deployment, *v1.DeploymentList, *appsv1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Deployment { return &v1.Deployment{} }, + func() *v1.DeploymentList { return &v1.DeploymentList{} }), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 917ed521f..d6dec016b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -45,6 +44,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ReplicaSet, *v1.ReplicaSetList, *appsv1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ReplicaSet { return &v1.ReplicaSet{} }, + func() *v1.ReplicaSetList { return &v1.ReplicaSetList{} }), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index d1fbb915d..b25ed0723 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -22,7 +22,6 @@ import ( "context" json "encoding/json" "fmt" - "time" v1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -31,8 +30,8 @@ import ( watch "k8s.io/apimachinery/pkg/watch" appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -45,6 +44,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -53,6 +53,7 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,27 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.StatefulSet, *v1.StatefulSetList, *appsv1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.StatefulSet { return &v1.StatefulSet{} }, + func() *v1.StatefulSetList { return &v1.StatefulSetList{} }), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -278,8 +97,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -303,8 +122,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 0c3f49ba1..185f7cc4e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.ControllerRevision, *v1beta1.ControllerRevisionList, *appsv1beta1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.ControllerRevision { return &v1beta1.ControllerRevision{} }, + func() *v1beta1.ControllerRevisionList { return &v1beta1.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 281758c43..06e4b7bf9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +40,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *appsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 3f1aebcff..1ff69eb99 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -43,6 +40,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) StatefulSetExpansion } // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.StatefulSet, *v1beta1.StatefulSetList, *appsv1beta1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.StatefulSet { return &v1beta1.StatefulSet{} }, + func() *v1beta1.StatefulSetList { return &v1beta1.StatefulSetList{} }), } } - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e1643277a..6caee6a72 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -55,154 +52,18 @@ type ControllerRevisionInterface interface { // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.ControllerRevision, *v1beta2.ControllerRevisionList, *appsv1beta2.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.ControllerRevision { return &v1beta2.ControllerRevision{} }, + func() *v1beta2.ControllerRevisionList { return &v1beta2.ControllerRevisionList{} }), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 1391df87d..766dc6d43 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.DaemonSet, *v1beta2.DaemonSetList, *appsv1beta2.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.DaemonSet { return &v1beta2.DaemonSet{} }, + func() *v1beta2.DaemonSetList { return &v1beta2.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 5bda0d92c..6592ee8cd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +40,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.Deployment, *v1beta2.DeploymentList, *appsv1beta2.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.Deployment { return &v1beta2.Deployment{} }, + func() *v1beta2.DeploymentList { return &v1beta2.DeploymentList{} }), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 988d898f7..90380ca98 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -43,6 +40,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) ReplicaSetExpansion } // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.ReplicaSet, *v1beta2.ReplicaSetList, *appsv1beta2.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.ReplicaSet { return &v1beta2.ReplicaSet{} }, + func() *v1beta2.ReplicaSetList { return &v1beta2.ReplicaSetList{} }), } } - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 0416675d6..f2d673abb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -43,6 +42,7 @@ type StatefulSetsGetter interface { type StatefulSetInterface interface { Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type StatefulSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) @@ -61,209 +62,27 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta2.StatefulSet, *v1beta2.StatefulSetList, *appsv1beta2.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta2.StatefulSet { return &v1beta2.StatefulSet{} }, + func() *v1beta2.StatefulSetList { return &v1beta2.StatefulSetList{} }), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { result = &v1beta2.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s } result = &v1beta2.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index aea9d0e13..81be8b2e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1Client struct { restClient rest.Interface } +func (c *AuthenticationV1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go index 0413fb2b6..35f2c22b4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go new file mode 100644 index 000000000..720dd9e7e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (*v1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + *gentype.Client[*v1.SelfSubjectReview] +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + gentype.NewClient[*v1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectReview { return &v1.SelfSubjectReview{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index ca7cd47d2..52c55fab0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -41,24 +41,17 @@ type TokenReviewInterface interface { // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*v1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.TokenReview { return &v1.TokenReview{} }), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { - result = &v1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go new file mode 100644 index 000000000..187392661 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/authentication/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1alpha1Interface interface { + RESTClient() rest.Interface + SelfSubjectReviewsGetter +} + +// AuthenticationV1alpha1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1alpha1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + +// NewForConfig creates a new AuthenticationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*AuthenticationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new AuthenticationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AuthenticationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &AuthenticationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1alpha1Client { + return &AuthenticationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..6a648364b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type SelfSubjectReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go new file mode 100644 index 000000000..f034bcdbe --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/authentication/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*v1alpha1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + *gentype.Client[*v1alpha1.SelfSubjectReview] +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + gentype.NewClient[*v1alpha1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.SelfSubjectReview { return &v1alpha1.SelfSubjectReview{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 218cb60c3..7823729e0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -28,6 +28,7 @@ import ( type AuthenticationV1beta1Interface interface { RESTClient() rest.Interface + SelfSubjectReviewsGetter TokenReviewsGetter } @@ -36,6 +37,10 @@ type AuthenticationV1beta1Client struct { restClient rest.Interface } +func (c *AuthenticationV1beta1Client) SelfSubjectReviews() SelfSubjectReviewInterface { + return newSelfSubjectReviews(c) +} + func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { return newTokenReviews(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go index 60bf15ab9..527a458d7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -18,4 +18,6 @@ limitations under the License. package v1beta1 +type SelfSubjectReviewExpansion interface{} + type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go new file mode 100644 index 000000000..d083ba8fa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/authentication/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. +// A group's client should implement this interface. +type SelfSubjectReviewsGetter interface { + SelfSubjectReviews() SelfSubjectReviewInterface +} + +// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. +type SelfSubjectReviewInterface interface { + Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error) + SelfSubjectReviewExpansion +} + +// selfSubjectReviews implements SelfSubjectReviewInterface +type selfSubjectReviews struct { + *gentype.Client[*v1beta1.SelfSubjectReview] +} + +// newSelfSubjectReviews returns a SelfSubjectReviews +func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { + return &selfSubjectReviews{ + gentype.NewClient[*v1beta1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectReview { return &v1beta1.SelfSubjectReview{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 5da122433..982534935 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -41,24 +41,17 @@ type TokenReviewInterface interface { // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.TokenReview { return &v1beta1.TokenReview{} }), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { - result = &v1beta1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index 84b2efe16..3d058941a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface { // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*v1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.LocalSubjectAccessReview { return &v1.LocalSubjectAccessReview{} }), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { - result = &v1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index 2006196c1..9e874bee5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface { // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectAccessReview { return &v1.SelfSubjectAccessReview{} }), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { - result = &v1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index 25d99f7b5..567b63ec4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface { // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*v1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SelfSubjectRulesReview { return &v1.SelfSubjectRulesReview{} }), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { - result = &v1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 8ac0566a2..52e8d74e5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface { // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.SubjectAccessReview { return &v1.SubjectAccessReview{} }), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { - result = &v1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index 78584ba94..302c094b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -41,27 +41,17 @@ type LocalSubjectAccessReviewInterface interface { // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*v1beta1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1beta1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.LocalSubjectAccessReview { return &v1beta1.LocalSubjectAccessReview{} }), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { - result = &v1beta1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 0286c93fe..4b413dc4f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectAccessReviewInterface interface { // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectAccessReview { return &v1beta1.SelfSubjectAccessReview{} }), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { - result = &v1beta1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index d772973ec..b64cec301 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -41,24 +41,17 @@ type SelfSubjectRulesReviewInterface interface { // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SelfSubjectRulesReview { return &v1beta1.SelfSubjectRulesReview{} }), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { - result = &v1beta1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index aebe8398c..3fca833a1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -23,8 +23,8 @@ import ( v1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -41,24 +41,17 @@ type SubjectAccessReviewInterface interface { // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*v1beta1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*v1beta1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.SubjectAccessReview { return &v1beta1.SubjectAccessReview{} }), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { - result = &v1beta1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 19afde66d..4d29ac522 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.HorizontalPodAutoscaler, *v1.HorizontalPodAutoscalerList, *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.HorizontalPodAutoscaler { return &v1.HorizontalPodAutoscaler{} }, + func() *v1.HorizontalPodAutoscalerList { return &v1.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go index 3a077d71d..dbce8d102 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2 import ( "context" - json "encoding/json" - "fmt" - "time" v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2.HorizontalPodAutoscaler, *v2.HorizontalPodAutoscalerList, *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2.HorizontalPodAutoscaler { return &v2.HorizontalPodAutoscaler{} }, + func() *v2.HorizontalPodAutoscalerList { return &v2.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 5080912a1..6bc1b7776 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2beta1.HorizontalPodAutoscaler, *v2beta1.HorizontalPodAutoscalerList, *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2beta1.HorizontalPodAutoscaler { return &v2beta1.HorizontalPodAutoscaler{} }, + func() *v2beta1.HorizontalPodAutoscalerList { return &v2beta1.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 0ddb9108b..6f464661a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -20,17 +20,14 @@ package v2beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -43,6 +40,7 @@ type HorizontalPodAutoscalersGetter interface { type HorizontalPodAutoscalerInterface interface { Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type HorizontalPodAutoscalerInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v2beta2.HorizontalPodAutoscaler, *v2beta2.HorizontalPodAutoscalerList, *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v2beta2.HorizontalPodAutoscaler { return &v2beta2.HorizontalPodAutoscaler{} }, + func() *v2beta2.HorizontalPodAutoscalerList { return &v2beta2.HorizontalPodAutoscalerList{} }), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go index 925026321..7907a5bf5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -43,6 +40,7 @@ type CronJobsGetter interface { type CronJobInterface interface { Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (*v1.CronJob, error) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type CronJobInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.CronJob, *v1.CronJobList, *batchv1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.CronJob { return &v1.CronJob{} }, + func() *v1.CronJobList { return &v1.CronJobList{} }), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index c076c80af..83dbe6fa4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // JobsGetter has a method to return a JobInterface. @@ -43,6 +40,7 @@ type JobsGetter interface { type JobInterface interface { Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type JobInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) JobExpansion } // jobs implements JobInterface type jobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration] } // newJobs returns a Jobs func newJobs(c *BatchV1Client, namespace string) *jobs { return &jobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Job, *v1.JobList, *batchv1.JobApplyConfiguration]( + "jobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Job { return &v1.Job{} }, + func() *v1.JobList { return &v1.JobList{} }), } } - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied job. -func (c *jobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *jobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index d687339ae..a6f7399d8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -43,6 +40,7 @@ type CronJobsGetter interface { type CronJobInterface interface { Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type CronJobInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.CronJob, *v1beta1.CronJobList, *batchv1beta1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.CronJob { return &v1beta1.CronJob{} }, + func() *v1beta1.CronJobList { return &v1beta1.CronJobList{} }), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go index 0d6b68b29..9fa3300e6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface { type CertificateSigningRequestInterface interface { Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (*v1.CertificateSigningRequest, error) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,6 +49,7 @@ type CertificateSigningRequestInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) @@ -59,195 +58,26 @@ type CertificateSigningRequestInterface interface { // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CertificateSigningRequest, *v1.CertificateSigningRequestList, *certificatesv1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CertificateSigningRequest { return &v1.CertificateSigningRequest{} }, + func() *v1.CertificateSigningRequestList { return &v1.CertificateSigningRequestList{} }), } } -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // UpdateApproval takes the top resource name and the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { result = &v1.CertificateSigningRequest{} - err = c.client.Put(). + err = c.GetClient().Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequestName). SubResource("approval"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go new file mode 100644 index 000000000..a9050af94 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterTrustBundlesGetter +} + +// CertificatesV1alpha1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1alpha1Client) ClusterTrustBundles() ClusterTrustBundleInterface { + return newClusterTrustBundles(c) +} + +// NewForConfig creates a new CertificatesV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CertificatesV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1alpha1Client { + return &CertificatesV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go new file mode 100644 index 000000000..74fe9fa14 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. +// A group's client should implement this interface. +type ClusterTrustBundlesGetter interface { + ClusterTrustBundles() ClusterTrustBundleInterface +} + +// ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources. +type ClusterTrustBundleInterface interface { + Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error) + Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) + Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) + ClusterTrustBundleExpansion +} + +// clusterTrustBundles implements ClusterTrustBundleInterface +type clusterTrustBundles struct { + *gentype.ClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration] +} + +// newClusterTrustBundles returns a ClusterTrustBundles +func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { + return &clusterTrustBundles{ + gentype.NewClientWithListAndApply[*v1alpha1.ClusterTrustBundle, *v1alpha1.ClusterTrustBundleList, *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration]( + "clustertrustbundles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterTrustBundle { return &v1alpha1.ClusterTrustBundle{} }, + func() *v1alpha1.ClusterTrustBundleList { return &v1alpha1.ClusterTrustBundleList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..43cc534b3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterTrustBundleExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index ec0b9d266..de9915c5d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -43,6 +40,7 @@ type CertificateSigningRequestsGetter interface { type CertificateSigningRequestInterface interface { Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type CertificateSigningRequestInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) CertificateSigningRequestExpansion } // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CertificateSigningRequest, *v1beta1.CertificateSigningRequestList, *certificatesv1beta1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CertificateSigningRequest { return &v1beta1.CertificateSigningRequest{} }, + func() *v1beta1.CertificateSigningRequestList { return &v1beta1.CertificateSigningRequestList{} }), } } - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go index 473789141..4e631b0a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -30,7 +30,7 @@ type CertificateSigningRequestExpansion interface { func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) { result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). + err = c.GetClient().Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). VersionedParams(&opts, scheme.ParameterCodec). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go index 9e6b169a8..97834d6ac 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -55,154 +52,18 @@ type LeaseInterface interface { // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Lease, *v1.LeaseList, *coordinationv1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Lease { return &v1.Lease{} }, + func() *v1.LeaseList { return &v1.LeaseList{} }), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go new file mode 100644 index 000000000..dd75e5d01 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/coordination_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1alpha1Interface interface { + RESTClient() rest.Interface + LeaseCandidatesGetter +} + +// CoordinationV1alpha1Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1alpha1Client) LeaseCandidates(namespace string) LeaseCandidateInterface { + return newLeaseCandidates(c, namespace) +} + +// NewForConfig creates a new CoordinationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CoordinationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CoordinationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1alpha1Client { + return &CoordinationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..2dc2f30cf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type LeaseCandidateExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go new file mode 100644 index 000000000..868185135 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha1/leasecandidate.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/coordination/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + coordinationv1alpha1 "k8s.io/client-go/applyconfigurations/coordination/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// LeaseCandidatesGetter has a method to return a LeaseCandidateInterface. +// A group's client should implement this interface. +type LeaseCandidatesGetter interface { + LeaseCandidates(namespace string) LeaseCandidateInterface +} + +// LeaseCandidateInterface has methods to work with LeaseCandidate resources. +type LeaseCandidateInterface interface { + Create(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.CreateOptions) (*v1alpha1.LeaseCandidate, error) + Update(ctx context.Context, leaseCandidate *v1alpha1.LeaseCandidate, opts v1.UpdateOptions) (*v1alpha1.LeaseCandidate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.LeaseCandidate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.LeaseCandidateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.LeaseCandidate, err error) + Apply(ctx context.Context, leaseCandidate *coordinationv1alpha1.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.LeaseCandidate, err error) + LeaseCandidateExpansion +} + +// leaseCandidates implements LeaseCandidateInterface +type leaseCandidates struct { + *gentype.ClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration] +} + +// newLeaseCandidates returns a LeaseCandidates +func newLeaseCandidates(c *CoordinationV1alpha1Client, namespace string) *leaseCandidates { + return &leaseCandidates{ + gentype.NewClientWithListAndApply[*v1alpha1.LeaseCandidate, *v1alpha1.LeaseCandidateList, *coordinationv1alpha1.LeaseCandidateApplyConfiguration]( + "leasecandidates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.LeaseCandidate { return &v1alpha1.LeaseCandidate{} }, + func() *v1alpha1.LeaseCandidateList { return &v1alpha1.LeaseCandidateList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 1bbd57bdd..62341e53b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -55,154 +52,18 @@ type LeaseInterface interface { // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Lease, *v1beta1.LeaseList, *coordinationv1beta1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Lease { return &v1beta1.Lease{} }, + func() *v1beta1.LeaseList { return &v1beta1.LeaseList{} }), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1beta1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 0fef56429..ab9458a5c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ComponentStatusesGetter has a method to return a ComponentStatusInterface. @@ -55,143 +52,18 @@ type ComponentStatusInterface interface { // componentStatuses implements ComponentStatusInterface type componentStatuses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration] } // newComponentStatuses returns a ComponentStatuses func newComponentStatuses(c *CoreV1Client) *componentStatuses { return &componentStatuses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ComponentStatus, *v1.ComponentStatusList, *corev1.ComponentStatusApplyConfiguration]( + "componentstatuses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ComponentStatus { return &v1.ComponentStatus{} }, + func() *v1.ComponentStatusList { return &v1.ComponentStatusList{} }), } } - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Patch(pt). - Resource("componentstatuses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus. -func (c *componentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) { - if componentStatus == nil { - return nil, fmt.Errorf("componentStatus provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(componentStatus) - if err != nil { - return nil, err - } - name := componentStatus.Name - if name == nil { - return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") - } - result = &v1.ComponentStatus{} - err = c.client.Patch(types.ApplyPatchType). - Resource("componentstatuses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index b68177720..72aa2361f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ConfigMapsGetter has a method to return a ConfigMapInterface. @@ -55,154 +52,18 @@ type ConfigMapInterface interface { // configMaps implements ConfigMapInterface type configMaps struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration] } // newConfigMaps returns a ConfigMaps func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { return &configMaps{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ConfigMap, *v1.ConfigMapList, *corev1.ConfigMapApplyConfiguration]( + "configmaps", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ConfigMap { return &v1.ConfigMap{} }, + func() *v1.ConfigMapList { return &v1.ConfigMapList{} }), } } - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied configMap. -func (c *configMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) { - if configMap == nil { - return nil, fmt.Errorf("configMap provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(configMap) - if err != nil { - return nil, err - } - name := configMap.Name - if name == nil { - return nil, fmt.Errorf("configMap.Name must be provided to Apply") - } - result = &v1.ConfigMap{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("configmaps"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index cdf464b06..9b9fc5fc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointsGetter has a method to return a EndpointsInterface. @@ -55,154 +52,18 @@ type EndpointsInterface interface { // endpoints implements EndpointsInterface type endpoints struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration] } // newEndpoints returns a Endpoints func newEndpoints(c *CoreV1Client, namespace string) *endpoints { return &endpoints{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Endpoints, *v1.EndpointsList, *corev1.EndpointsApplyConfiguration]( + "endpoints", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Endpoints { return &v1.Endpoints{} }, + func() *v1.EndpointsList { return &v1.EndpointsList{} }), } } - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpoints. -func (c *endpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) { - if endpoints == nil { - return nil, fmt.Errorf("endpoints provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpoints) - if err != nil { - return nil, err - } - name := endpoints.Name - if name == nil { - return nil, fmt.Errorf("endpoints.Name must be provided to Apply") - } - result = &v1.Endpoints{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpoints"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 8274d85ff..5ff0f0690 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *CoreV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *corev1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index a3fdf57a9..424357232 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -48,11 +48,11 @@ type EventExpansion interface { // event; it must either match this event client's namespace, or this event // client must have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -67,11 +67,11 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // created with the "" namespace. Update also requires the ResourceVersion to be set in the event // object. func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -87,11 +87,11 @@ func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // match this event client's namespace, or this event client must have been // created with the "" namespace. func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { - if e.ns != "" && incompleteEvent.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + if e.GetNamespace() != "" && incompleteEvent.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). Resource("events"). Name(incompleteEvent.Name). @@ -109,8 +109,8 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev if err != nil { return nil, err } - if len(e.ns) > 0 && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + if len(e.GetNamespace()) > 0 && ref.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.GetNamespace()) } stringRefKind := string(ref.Kind) var refKind *string diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index e6883b607..f8e4048f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LimitRangesGetter has a method to return a LimitRangeInterface. @@ -55,154 +52,18 @@ type LimitRangeInterface interface { // limitRanges implements LimitRangeInterface type limitRanges struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration] } // newLimitRanges returns a LimitRanges func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { return &limitRanges{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.LimitRange, *v1.LimitRangeList, *corev1.LimitRangeApplyConfiguration]( + "limitranges", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.LimitRange { return &v1.LimitRange{} }, + func() *v1.LimitRangeList { return &v1.LimitRangeList{} }), } } - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied limitRange. -func (c *limitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) { - if limitRange == nil { - return nil, fmt.Errorf("limitRange provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(limitRange) - if err != nil { - return nil, err - } - name := limitRange.Name - if name == nil { - return nil, fmt.Errorf("limitRange.Name must be provided to Apply") - } - result = &v1.LimitRange{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("limitranges"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 06c77b4c4..75d20648f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NamespacesGetter has a method to return a NamespaceInterface. @@ -43,6 +40,7 @@ type NamespacesGetter interface { type NamespaceInterface interface { Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) @@ -50,178 +48,25 @@ type NamespaceInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) NamespaceExpansion } // namespaces implements NamespaceInterface type namespaces struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration] } // newNamespaces returns a Namespaces func newNamespaces(c *CoreV1Client) *namespaces { return &namespaces{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Namespace, *v1.NamespaceList, *corev1.NamespaceApplyConfiguration]( + "namespaces", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Namespace { return &v1.Namespace{} }, + func() *v1.NamespaceList { return &v1.NamespaceList{} }), } } - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied namespace. -func (c *namespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *namespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go index be1116db1..4f720fb92 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -32,6 +32,6 @@ type NamespaceExpansion interface { // Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. func (c *namespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) + err = c.GetClient().Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) return } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index d9725b2f9..df1a7817f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NodesGetter has a method to return a NodeInterface. @@ -43,6 +40,7 @@ type NodesGetter interface { type NodeInterface interface { Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type NodeInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) NodeExpansion } // nodes implements NodeInterface type nodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration] } // newNodes returns a Nodes func newNodes(c *CoreV1Client) *nodes { return &nodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.Node, *v1.NodeList, *corev1.NodeApplyConfiguration]( + "nodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.Node { return &v1.Node{} }, + func() *v1.NodeList { return &v1.NodeList{} }), } } - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Post(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *nodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *nodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go index bdf7bfed8..df86253b0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -34,7 +34,7 @@ type NodeExpansion interface { // the node that the server returns, or an error. func (c *nodes) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) { result := &v1.Node{} - err := c.client.Patch(types.StrategicMergePatchType). + err := c.GetClient().Patch(types.StrategicMergePatchType). Resource("nodes"). Name(nodeName). SubResource("status"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index a8e229597..8be40f866 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumesGetter has a method to return a PersistentVolumeInterface. @@ -43,6 +40,7 @@ type PersistentVolumesGetter interface { type PersistentVolumeInterface interface { Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type PersistentVolumeInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) PersistentVolumeExpansion } // persistentVolumes implements PersistentVolumeInterface type persistentVolumes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration] } // newPersistentVolumes returns a PersistentVolumes func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { return &persistentVolumes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PersistentVolume, *v1.PersistentVolumeList, *corev1.PersistentVolumeApplyConfiguration]( + "persistentvolumes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PersistentVolume { return &v1.PersistentVolume{} }, + func() *v1.PersistentVolumeList { return &v1.PersistentVolumeList{} }), } } - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Patch(pt). - Resource("persistentvolumes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume. -func (c *persistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 2e7f4fb44..7721b0092 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. @@ -43,6 +40,7 @@ type PersistentVolumeClaimsGetter interface { type PersistentVolumeClaimInterface interface { Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type PersistentVolumeClaimInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } // persistentVolumeClaims implements PersistentVolumeClaimInterface type persistentVolumeClaims struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration] } // newPersistentVolumeClaims returns a PersistentVolumeClaims func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { return &persistentVolumeClaims{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PersistentVolumeClaim, *v1.PersistentVolumeClaimList, *corev1.PersistentVolumeClaimApplyConfiguration]( + "persistentvolumeclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{} }, + func() *v1.PersistentVolumeClaimList { return &v1.PersistentVolumeClaimList{} }), } } - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim. -func (c *persistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 63122cf3f..470b7de7b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodsGetter has a method to return a PodInterface. @@ -43,6 +40,7 @@ type PodsGetter interface { type PodInterface interface { Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,6 +49,7 @@ type PodInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) @@ -59,209 +58,27 @@ type PodInterface interface { // pods implements PodInterface type pods struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration] } // newPods returns a Pods func newPods(c *CoreV1Client, namespace string) *pods { return &pods{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Pod, *v1.PodList, *corev1.PodApplyConfiguration]( + "pods", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Pod { return &v1.Pod{} }, + func() *v1.PodList { return &v1.PodList{} }), } } -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pod. -func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pods"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied pod. -func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("pods"). Name(podName). SubResource("ephemeralcontainers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go index 8b6e0e932..a2d4d70d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -47,33 +47,33 @@ type PodExpansion interface { // Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() } // Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource. // Equivalent to calling EvictV1beta1. // Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1(). func (c *pods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } // Get constructs a request for getting the logs for a pod func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) + return c.GetClient().Get().Namespace(c.GetNamespace()).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) } // ProxyGet returns a response of the pod by calling it through the proxy. func (c *pods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("pods"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index ff90fc0e6..060a05909 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodTemplatesGetter has a method to return a PodTemplateInterface. @@ -55,154 +52,18 @@ type PodTemplateInterface interface { // podTemplates implements PodTemplateInterface type podTemplates struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration] } // newPodTemplates returns a PodTemplates func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { return &podTemplates{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PodTemplate, *v1.PodTemplateList, *corev1.PodTemplateApplyConfiguration]( + "podtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PodTemplate { return &v1.PodTemplate{} }, + func() *v1.PodTemplateList { return &v1.PodTemplateList{} }), } } - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate. -func (c *podTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) { - if podTemplate == nil { - return nil, fmt.Errorf("podTemplate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podTemplate) - if err != nil { - return nil, err - } - name := podTemplate.Name - if name == nil { - return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") - } - result = &v1.PodTemplate{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podtemplates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 49c75d967..9b275ed1b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -20,9 +20,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" @@ -30,8 +27,8 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicationControllersGetter has a method to return a ReplicationControllerInterface. @@ -44,6 +41,7 @@ type ReplicationControllersGetter interface { type ReplicationControllerInterface interface { Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -52,6 +50,7 @@ type ReplicationControllerInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -61,209 +60,27 @@ type ReplicationControllerInterface interface { // replicationControllers implements ReplicationControllerInterface type replicationControllers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration] } // newReplicationControllers returns a ReplicationControllers func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { return &replicationControllers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ReplicationController, *v1.ReplicationControllerList, *corev1.ReplicationControllerApplyConfiguration]( + "replicationcontrollers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ReplicationController { return &v1.ReplicationController{} }, + func() *v1.ReplicationControllerList { return &v1.ReplicationControllerList{} }), } } -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicationController. -func (c *replicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). @@ -276,8 +93,8 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 8444d164e..4b2dcd3b5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ResourceQuotasGetter has a method to return a ResourceQuotaInterface. @@ -43,6 +40,7 @@ type ResourceQuotasGetter interface { type ResourceQuotaInterface interface { Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type ResourceQuotaInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) ResourceQuotaExpansion } // resourceQuotas implements ResourceQuotaInterface type resourceQuotas struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration] } // newResourceQuotas returns a ResourceQuotas func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { return &resourceQuotas{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ResourceQuota, *v1.ResourceQuotaList, *corev1.ResourceQuotaApplyConfiguration]( + "resourcequotas", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ResourceQuota { return &v1.ResourceQuota{} }, + func() *v1.ResourceQuotaList { return &v1.ResourceQuotaList{} }), } } - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota. -func (c *resourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 4aba33038..12a8d1178 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SecretsGetter has a method to return a SecretInterface. @@ -55,154 +52,18 @@ type SecretInterface interface { // secrets implements SecretInterface type secrets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration] } // newSecrets returns a Secrets func newSecrets(c *CoreV1Client, namespace string) *secrets { return &secrets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Secret, *v1.SecretList, *corev1.SecretApplyConfiguration]( + "secrets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Secret { return &v1.Secret{} }, + func() *v1.SecretList { return &v1.SecretList{} }), } } - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied secret. -func (c *secrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) { - if secret == nil { - return nil, fmt.Errorf("secret provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(secret) - if err != nil { - return nil, err - } - name := secret.Name - if name == nil { - return nil, fmt.Errorf("secret.Name must be provided to Apply") - } - result = &v1.Secret{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("secrets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 3fe22ba44..ec935a324 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServicesGetter has a method to return a ServiceInterface. @@ -43,6 +40,7 @@ type ServicesGetter interface { type ServiceInterface interface { Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) @@ -50,190 +48,25 @@ type ServiceInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) ServiceExpansion } // services implements ServiceInterface type services struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration] } // newServices returns a Services func newServices(c *CoreV1Client, namespace string) *services { return &services{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Service, *v1.ServiceList, *corev1.ServiceApplyConfiguration]( + "services", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Service { return &v1.Service{} }, + func() *v1.ServiceList { return &v1.ServiceList{} }), } } - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied service. -func (c *services) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *services) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go index 4937fd1a3..9a6f78138 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go @@ -28,8 +28,8 @@ type ServiceExpansion interface { // ProxyGet returns a response of the service by calling it through the proxy. func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("services"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index bdf589b96..eb995d454 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -20,9 +20,6 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" @@ -30,8 +27,8 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServiceAccountsGetter has a method to return a ServiceAccountInterface. @@ -58,163 +55,27 @@ type ServiceAccountInterface interface { // serviceAccounts implements ServiceAccountInterface type serviceAccounts struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration] } // newServiceAccounts returns a ServiceAccounts func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { return &serviceAccounts{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.ServiceAccount, *v1.ServiceAccountList, *corev1.ServiceAccountApplyConfiguration]( + "serviceaccounts", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.ServiceAccount { return &v1.ServiceAccount{} }, + func() *v1.ServiceAccountList { return &v1.ServiceAccountList{} }), } } -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount. -func (c *serviceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) { - if serviceAccount == nil { - return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceAccount) - if err != nil { - return nil, err - } - name := serviceAccount.Name - if name == nil { - return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") - } - result = &v1.ServiceAccount{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { result = &authenticationv1.TokenRequest{} - err = c.client.Post(). - Namespace(c.ns). + err = c.GetClient().Post(). + Namespace(c.GetNamespace()). Resource("serviceaccounts"). Name(serviceAccountName). SubResource("token"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go index 63e616b03..1f927055c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -55,154 +52,18 @@ type EndpointSliceInterface interface { // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.EndpointSlice, *v1.EndpointSliceList, *discoveryv1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.EndpointSlice { return &v1.EndpointSlice{} }, + func() *v1.EndpointSliceList { return &v1.EndpointSliceList{} }), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go index 2ade83302..298cfbc87 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -55,154 +52,18 @@ type EndpointSliceInterface interface { // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.EndpointSlice, *v1beta1.EndpointSliceList, *discoveryv1beta1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.EndpointSlice { return &v1beta1.EndpointSlice{} }, + func() *v1beta1.EndpointSliceList { return &v1beta1.EndpointSliceList{} }), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go index c9f2bbed5..d021a76c4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Event, *v1.EventList, *eventsv1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Event { return &v1.Event{} }, + func() *v1.EventList { return &v1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index dfdf8b897..77ca2e775 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -55,154 +52,18 @@ type EventInterface interface { // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1beta1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Event, *v1beta1.EventList, *eventsv1beta1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Event { return &v1beta1.Event{} }, + func() *v1beta1.EventList { return &v1beta1.EventList{} }), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1beta1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 464fff911..4ddbaa31a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -44,11 +44,11 @@ type EventExpansion interface { // it must either match this event client's namespace, or this event client must // have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -64,11 +64,11 @@ func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // created with the "" namespace. // Update also requires the ResourceVersion to be set in the event object. func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -82,14 +82,13 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // It returns the copy of the event that the server returns, or an error. // The namespace and name of the target event is deduced from the event. // The namespace must either match this event client's namespace, or this event client must -// -// have been created with the "" namespace. +// have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index ffe219fda..f86194bf0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -43,6 +40,7 @@ type DaemonSetsGetter interface { type DaemonSetInterface interface { Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type DaemonSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.DaemonSet, *v1beta1.DaemonSetList, *extensionsv1beta1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.DaemonSet { return &v1beta1.DaemonSet{} }, + func() *v1beta1.DaemonSetList { return &v1beta1.DaemonSetList{} }), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index c41d8dbc2..021fbb3b3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,6 +42,7 @@ type DeploymentsGetter interface { type DeploymentInterface interface { Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type DeploymentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) @@ -61,209 +62,27 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Deployment, *v1beta1.DeploymentList, *extensionsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Deployment { return &v1beta1.Deployment{} }, + func() *v1beta1.DeploymentList { return &v1beta1.DeploymentList{} }), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca } result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go index 5c409ac99..bd75b8a38 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -31,5 +31,5 @@ type DeploymentExpansion interface { // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. func (c *deployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 827b514df..4725d2cd1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsV1beta1Interface interface { DeploymentsGetter IngressesGetter NetworkPoliciesGetter - PodSecurityPoliciesGetter ReplicaSetsGetter } @@ -57,10 +56,6 @@ func (c *ExtensionsV1beta1Client) NetworkPolicies(namespace string) NetworkPolic return newNetworkPolicies(c, namespace) } -func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index 41d28f041..67fcf4992 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -24,6 +24,4 @@ type IngressExpansion interface{} type NetworkPolicyExpansion interface{} -type PodSecurityPolicyExpansion interface{} - type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index dd4012cc2..4511c93fc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *extensionsv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index 978b26db0..afa8203c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface { // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.NetworkPolicy, *v1beta1.NetworkPolicyList, *extensionsv1beta1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.NetworkPolicy { return &v1beta1.NetworkPolicy{} }, + func() *v1beta1.NetworkPolicyList { return &v1beta1.NetworkPolicyList{} }), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 3f38c3133..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *extensionsv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 3c907a3a0..8973948f3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -22,15 +22,14 @@ import ( "context" json "encoding/json" "fmt" - "time" v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -43,6 +42,7 @@ type ReplicaSetsGetter interface { type ReplicaSetInterface interface { Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,6 +51,7 @@ type ReplicaSetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) @@ -61,209 +62,27 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.ReplicaSet, *v1beta1.ReplicaSetList, *extensionsv1beta1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.ReplicaSet { return &v1beta1.ReplicaSet{} }, + func() *v1beta1.ReplicaSetList { return &v1beta1.ReplicaSetList{} }), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -276,8 +95,8 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -301,8 +120,8 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca } result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = c.GetClient().Patch(types.ApplyPatchType). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go index c6f2d9405..3d7d93ef1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go @@ -16,39 +16,39 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "net/http" - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/api/flowcontrol/v1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type FlowcontrolV1alpha1Interface interface { +type FlowcontrolV1Interface interface { RESTClient() rest.Interface FlowSchemasGetter PriorityLevelConfigurationsGetter } -// FlowcontrolV1alpha1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. -type FlowcontrolV1alpha1Client struct { +// FlowcontrolV1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1Client struct { restClient rest.Interface } -func (c *FlowcontrolV1alpha1Client) FlowSchemas() FlowSchemaInterface { +func (c *FlowcontrolV1Client) FlowSchemas() FlowSchemaInterface { return newFlowSchemas(c) } -func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { +func (c *FlowcontrolV1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { return newPriorityLevelConfigurations(c) } -// NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. +// NewForConfig creates a new FlowcontrolV1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { +func NewForConfig(c *rest.Config) (*FlowcontrolV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -60,9 +60,9 @@ func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new FlowcontrolV1alpha1Client for the given config and http client. +// NewForConfigAndClient creates a new FlowcontrolV1Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -71,12 +71,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1 if err != nil { return nil, err } - return &FlowcontrolV1alpha1Client{client}, nil + return &FlowcontrolV1Client{client}, nil } -// NewForConfigOrDie creates a new FlowcontrolV1alpha1Client for the given config and +// NewForConfigOrDie creates a new FlowcontrolV1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -84,13 +84,13 @@ func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { return client } -// New creates a new FlowcontrolV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *FlowcontrolV1alpha1Client { - return &FlowcontrolV1alpha1Client{c} +// New creates a new FlowcontrolV1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1Client { + return &FlowcontrolV1Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -104,7 +104,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FlowcontrolV1alpha1Client) RESTClient() rest.Interface { +func (c *FlowcontrolV1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go new file mode 100644 index 000000000..2606cee07 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FlowSchema, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.FlowSchemaList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + *gentype.ClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration] +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { + return &flowSchemas{ + gentype.NewClientWithListAndApply[*v1.FlowSchema, *v1.FlowSchemaList, *flowcontrolv1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.FlowSchema { return &v1.FlowSchema{} }, + func() *v1.FlowSchemaList { return &v1.FlowSchemaList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go index 065b5e6b4..990677388 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 type FlowSchemaExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go new file mode 100644 index 000000000..64907af60 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/flowcontrol/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + *gentype.ClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration] +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + gentype.NewClientWithListAndApply[*v1.PriorityLevelConfiguration, *v1.PriorityLevelConfigurationList, *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PriorityLevelConfiguration { return &v1.PriorityLevelConfiguration{} }, + func() *v1.PriorityLevelConfigurationList { return &v1.PriorityLevelConfigurationList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go deleted file mode 100644 index 95baf8251..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// FlowSchemasGetter has a method to return a FlowSchemaInterface. -// A group's client should implement this interface. -type FlowSchemasGetter interface { - FlowSchemas() FlowSchemaInterface -} - -// FlowSchemaInterface has methods to work with FlowSchema resources. -type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (*v1alpha1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) - FlowSchemaExpansion -} - -// flowSchemas implements FlowSchemaInterface -type flowSchemas struct { - client rest.Interface -} - -// newFlowSchemas returns a FlowSchemas -func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas { - return &flowSchemas{ - client: c.RESTClient(), - } -} - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { - result = &v1alpha1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1alpha1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1alpha1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go deleted file mode 100644 index 327b727c1..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. -// A group's client should implement this interface. -type PriorityLevelConfigurationsGetter interface { - PriorityLevelConfigurations() PriorityLevelConfigurationInterface -} - -// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. -type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) - PriorityLevelConfigurationExpansion -} - -// priorityLevelConfigurations implements PriorityLevelConfigurationInterface -type priorityLevelConfigurations struct { - client rest.Interface -} - -// newPriorityLevelConfigurations returns a PriorityLevelConfigurations -func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevelConfigurations { - return &priorityLevelConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { - result = &v1alpha1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1alpha1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1alpha1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go index a9d38becf..3c6805b9b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.FlowSchema, *v1beta1.FlowSchemaList, *flowcontrolv1beta1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.FlowSchema { return &v1beta1.FlowSchema{} }, + func() *v1beta1.FlowSchemaList { return &v1beta1.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go index 41f35cbcc..049f4049d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.PriorityLevelConfiguration, *v1beta1.PriorityLevelConfigurationList, *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.PriorityLevelConfiguration { return &v1beta1.PriorityLevelConfiguration{} }, + func() *v1beta1.PriorityLevelConfigurationList { return &v1beta1.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go index 3a1f12b6a..270615762 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -43,6 +40,7 @@ type FlowSchemasGetter interface { type FlowSchemaInterface interface { Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type FlowSchemaInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta2.FlowSchema, *v1beta2.FlowSchemaList, *flowcontrolv1beta2.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta2.FlowSchema { return &v1beta2.FlowSchema{} }, + func() *v1beta2.FlowSchemaList { return &v1beta2.FlowSchemaList{} }), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go index f028869f1..00ead4c60 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -20,17 +20,14 @@ package v1beta2 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -43,6 +40,7 @@ type PriorityLevelConfigurationsGetter interface { type PriorityLevelConfigurationInterface interface { Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type PriorityLevelConfigurationInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta2.PriorityLevelConfiguration, *v1beta2.PriorityLevelConfigurationList, *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta2.PriorityLevelConfiguration { return &v1beta2.PriorityLevelConfiguration{} }, + func() *v1beta2.PriorityLevelConfigurationList { return &v1beta2.PriorityLevelConfigurationList{} }), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go new file mode 100644 index 000000000..09b1d25b0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta3 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go new file mode 100644 index 000000000..461120bd3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "net/http" + + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type FlowcontrolV1beta3Interface interface { + RESTClient() rest.Interface + FlowSchemasGetter + PriorityLevelConfigurationsGetter +} + +// FlowcontrolV1beta3Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1beta3Client struct { + restClient rest.Interface +} + +func (c *FlowcontrolV1beta3Client) FlowSchemas() FlowSchemaInterface { + return newFlowSchemas(c) +} + +func (c *FlowcontrolV1beta3Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { + return newPriorityLevelConfigurations(c) +} + +// NewForConfig creates a new FlowcontrolV1beta3Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*FlowcontrolV1beta3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new FlowcontrolV1beta3Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1beta3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &FlowcontrolV1beta3Client{client}, nil +} + +// NewForConfigOrDie creates a new FlowcontrolV1beta3Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1beta3Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new FlowcontrolV1beta3Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1beta3Client { + return &FlowcontrolV1beta3Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta3.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FlowcontrolV1beta3Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go new file mode 100644 index 000000000..35f600cdf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// FlowSchemasGetter has a method to return a FlowSchemaInterface. +// A group's client should implement this interface. +type FlowSchemasGetter interface { + FlowSchemas() FlowSchemaInterface +} + +// FlowSchemaInterface has methods to work with FlowSchema resources. +type FlowSchemaInterface interface { + Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta3.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + FlowSchemaExpansion +} + +// flowSchemas implements FlowSchemaInterface +type flowSchemas struct { + *gentype.ClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration] +} + +// newFlowSchemas returns a FlowSchemas +func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas { + return &flowSchemas{ + gentype.NewClientWithListAndApply[*v1beta3.FlowSchema, *v1beta3.FlowSchemaList, *flowcontrolv1beta3.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta3.FlowSchema { return &v1beta3.FlowSchema{} }, + func() *v1beta3.FlowSchemaList { return &v1beta3.FlowSchemaList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go new file mode 100644 index 000000000..51ae8d1f3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +type FlowSchemaExpansion interface{} + +type PriorityLevelConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go new file mode 100644 index 000000000..93842e0cf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta3 + +import ( + "context" + + v1beta3 "k8s.io/api/flowcontrol/v1beta3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. +// A group's client should implement this interface. +type PriorityLevelConfigurationsGetter interface { + PriorityLevelConfigurations() PriorityLevelConfigurationInterface +} + +// PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. +type PriorityLevelConfigurationInterface interface { + Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta3.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + PriorityLevelConfigurationExpansion +} + +// priorityLevelConfigurations implements PriorityLevelConfigurationInterface +type priorityLevelConfigurations struct { + *gentype.ClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration] +} + +// newPriorityLevelConfigurations returns a PriorityLevelConfigurations +func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations { + return &priorityLevelConfigurations{ + gentype.NewClientWithListAndApply[*v1beta3.PriorityLevelConfiguration, *v1beta3.PriorityLevelConfigurationList, *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta3.PriorityLevelConfiguration { return &v1beta3.PriorityLevelConfiguration{} }, + func() *v1beta3.PriorityLevelConfigurationList { return &v1beta3.PriorityLevelConfigurationList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go index 9923d6cba..afaff4912 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Ingress, *v1.IngressList, *networkingv1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Ingress { return &v1.Ingress{} }, + func() *v1.IngressList { return &v1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go index 16c8e48bf..3301e8799 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -55,143 +52,18 @@ type IngressClassInterface interface { // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.IngressClass, *v1.IngressClassList, *networkingv1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.IngressClass { return &v1.IngressClass{} }, + func() *v1.IngressClassList { return &v1.IngressClassList{} }), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index d7454ce14..ba2ef32db 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -55,154 +52,18 @@ type NetworkPolicyInterface interface { // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.NetworkPolicy, *v1.NetworkPolicyList, *networkingv1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.NetworkPolicy { return &v1.NetworkPolicy{} }, + func() *v1.NetworkPolicyList { return &v1.NetworkPolicyList{} }), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..df12a463d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type IPAddressExpansion interface{} + +type ServiceCIDRExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go new file mode 100644 index 000000000..33e90d18a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + *gentype.ClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration] +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { + return &iPAddresses{ + gentype.NewClientWithListAndApply[*v1alpha1.IPAddress, *v1alpha1.IPAddressList, *networkingv1alpha1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.IPAddress { return &v1alpha1.IPAddress{} }, + func() *v1alpha1.IPAddressList { return &v1alpha1.IPAddressList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go new file mode 100644 index 000000000..c730e6246 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1alpha1Interface interface { + RESTClient() rest.Interface + IPAddressesGetter + ServiceCIDRsGetter +} + +// NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + +func (c *NetworkingV1alpha1Client) ServiceCIDRs() ServiceCIDRInterface { + return newServiceCIDRs(c) +} + +// NewForConfig creates a new NetworkingV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NetworkingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NetworkingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1alpha1Client { + return &NetworkingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go new file mode 100644 index 000000000..b72fe5b69 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. +// A group's client should implement this interface. +type ServiceCIDRsGetter interface { + ServiceCIDRs() ServiceCIDRInterface +} + +// ServiceCIDRInterface has methods to work with ServiceCIDR resources. +type ServiceCIDRInterface interface { + Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + ServiceCIDRExpansion +} + +// serviceCIDRs implements ServiceCIDRInterface +type serviceCIDRs struct { + *gentype.ClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration] +} + +// newServiceCIDRs returns a ServiceCIDRs +func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { + return &serviceCIDRs{ + gentype.NewClientWithListAndApply[*v1alpha1.ServiceCIDR, *v1alpha1.ServiceCIDRList, *networkingv1alpha1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ServiceCIDR { return &v1alpha1.ServiceCIDR{} }, + func() *v1alpha1.ServiceCIDRList { return &v1alpha1.ServiceCIDRList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go index f74c7257a..ac1ffbb98 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +type IPAddressExpansion interface{} + type IngressExpansion interface{} type IngressClassExpansion interface{} + +type ServiceCIDRExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index b309281af..90be275ad 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -43,6 +40,7 @@ type IngressesGetter interface { type IngressInterface interface { Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type IngressInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Ingress, *v1beta1.IngressList, *networkingv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Ingress { return &v1beta1.Ingress{} }, + func() *v1beta1.IngressList { return &v1beta1.IngressList{} }), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go index 50ccdfdbb..c55da4168 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -55,143 +52,18 @@ type IngressClassInterface interface { // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.IngressClass, *v1beta1.IngressClassList, *networkingv1beta1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.IngressClass { return &v1beta1.IngressClass{} }, + func() *v1beta1.IngressClassList { return &v1beta1.IngressClassList{} }), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1beta1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go new file mode 100644 index 000000000..09e4139e7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.CreateOptions) (*v1beta1.IPAddress, error) + Update(ctx context.Context, iPAddress *v1beta1.IPAddress, opts v1.UpdateOptions) (*v1beta1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *networkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + *gentype.ClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration] +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1beta1Client) *iPAddresses { + return &iPAddresses{ + gentype.NewClientWithListAndApply[*v1beta1.IPAddress, *v1beta1.IPAddressList, *networkingv1beta1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.IPAddress { return &v1beta1.IPAddress{} }, + func() *v1beta1.IPAddressList { return &v1beta1.IPAddressList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 851634ed0..d35225abd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -28,8 +28,10 @@ import ( type NetworkingV1beta1Interface interface { RESTClient() rest.Interface + IPAddressesGetter IngressesGetter IngressClassesGetter + ServiceCIDRsGetter } // NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. @@ -37,6 +39,10 @@ type NetworkingV1beta1Client struct { restClient rest.Interface } +func (c *NetworkingV1beta1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } @@ -45,6 +51,10 @@ func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface { return newIngressClasses(c) } +func (c *NetworkingV1beta1Client) ServiceCIDRs() ServiceCIDRInterface { + return newServiceCIDRs(c) +} + // NewForConfig creates a new NetworkingV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go new file mode 100644 index 000000000..d3336f2ec --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. +// A group's client should implement this interface. +type ServiceCIDRsGetter interface { + ServiceCIDRs() ServiceCIDRInterface +} + +// ServiceCIDRInterface has methods to work with ServiceCIDR resources. +type ServiceCIDRInterface interface { + Create(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.CreateOptions) (*v1beta1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, serviceCIDR *v1beta1.ServiceCIDR, opts v1.UpdateOptions) (*v1beta1.ServiceCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ServiceCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ServiceCIDR, err error) + ServiceCIDRExpansion +} + +// serviceCIDRs implements ServiceCIDRInterface +type serviceCIDRs struct { + *gentype.ClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration] +} + +// newServiceCIDRs returns a ServiceCIDRs +func newServiceCIDRs(c *NetworkingV1beta1Client) *serviceCIDRs { + return &serviceCIDRs{ + gentype.NewClientWithListAndApply[*v1beta1.ServiceCIDR, *v1beta1.ServiceCIDRList, *networkingv1beta1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ServiceCIDR { return &v1beta1.ServiceCIDR{} }, + func() *v1beta1.ServiceCIDRList { return &v1beta1.ServiceCIDRList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go index 5ec38b203..6c8110640 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/node/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1 "k8s.io/client-go/applyconfigurations/node/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.RuntimeClass, *v1.RuntimeClassList, *nodev1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.RuntimeClass { return &v1.RuntimeClass{} }, + func() *v1.RuntimeClassList { return &v1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go index 039a7ace1..60aa4a213 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.RuntimeClass, *v1alpha1.RuntimeClassList, *nodev1alpha1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.RuntimeClass { return &v1alpha1.RuntimeClass{} }, + func() *v1alpha1.RuntimeClassList { return &v1alpha1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go index f8990adf1..8e15d5288 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -55,143 +52,18 @@ type RuntimeClassInterface interface { // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.RuntimeClass, *v1beta1.RuntimeClassList, *nodev1beta1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.RuntimeClass { return &v1beta1.RuntimeClass{} }, + func() *v1beta1.RuntimeClassList { return &v1beta1.RuntimeClassList{} }), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go index cd1aac9c2..22173d36d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1 import ( - rest "k8s.io/client-go/rest" + v1 "k8s.io/api/policy/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,17 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*v1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Eviction { return &v1.Eviction{} }), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go index 853187feb..2c7e95b72 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go index 58db3acf9..6d011cbce 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface { type PodDisruptionBudgetInterface interface { Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (*v1.PodDisruptionBudget, error) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.PodDisruptionBudget, *v1.PodDisruptionBudgetList, *policyv1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.PodDisruptionBudget { return &v1.PodDisruptionBudget{} }, + func() *v1.PodDisruptionBudgetList { return &v1.PodDisruptionBudgetList{} }), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go index 12e8e76ed..e003ece6b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1beta1 import ( - rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/api/policy/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,17 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*v1beta1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*v1beta1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Eviction { return &v1beta1.Eviction{} }), } } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go index c003671f5..d7c28987c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go index 078c16d5c..6fce70c4e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -19,5 +19,3 @@ limitations under the License. package v1beta1 type PodDisruptionBudgetExpansion interface{} - -type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 168728992..411181237 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -43,6 +40,7 @@ type PodDisruptionBudgetsGetter interface { type PodDisruptionBudgetInterface interface { Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,206 +49,25 @@ type PodDisruptionBudgetInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.PodDisruptionBudget, *v1beta1.PodDisruptionBudgetList, *policyv1beta1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.PodDisruptionBudget { return &v1beta1.PodDisruptionBudget{} }, + func() *v1beta1.PodDisruptionBudgetList { return &v1beta1.PodDisruptionBudgetList{} }), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 944b61de4..000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) - Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSecurityPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. -func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { - if podSecurityPolicy == nil { - return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSecurityPolicy) - if err != nil { - return nil, err - } - name := podSecurityPolicy.Name - if name == nil { - return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") - } - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("podsecuritypolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index 5b65c9c0a..fdb509321 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -30,7 +30,6 @@ type PolicyV1beta1Interface interface { RESTClient() rest.Interface EvictionsGetter PodDisruptionBudgetsGetter - PodSecurityPoliciesGetter } // PolicyV1beta1Client is used to interact with features provided by the policy group. @@ -46,10 +45,6 @@ func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisrupti return newPodDisruptionBudgets(c, namespace) } -func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - // NewForConfig creates a new PolicyV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 000d737f0..19fff0ee4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterRole, *v1.ClusterRoleList, *rbacv1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterRole { return &v1.ClusterRole{} }, + func() *v1.ClusterRoleList { return &v1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 31db43d98..77fb3785e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.ClusterRoleBinding, *v1.ClusterRoleBindingList, *rbacv1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterRoleBinding { return &v1.ClusterRoleBinding{} }, + func() *v1.ClusterRoleBindingList { return &v1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index 93810a3ff..b75b055f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.Role, *v1.RoleList, *rbacv1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Role { return &v1.Role{} }, + func() *v1.RoleList { return &v1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 2ace93860..fcbb1c0e2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1.RoleBinding, *v1.RoleBindingList, *rbacv1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.RoleBinding { return &v1.RoleBinding{} }, + func() *v1.RoleBindingList { return &v1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index d6d30e99e..f91e2c50a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterRole, *v1alpha1.ClusterRoleList, *rbacv1alpha1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterRole { return &v1alpha1.ClusterRole{} }, + func() *v1alpha1.ClusterRoleList { return &v1alpha1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 2eded92ac..3f04526f0 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.ClusterRoleBinding, *v1alpha1.ClusterRoleBindingList, *rbacv1alpha1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterRoleBinding { return &v1alpha1.ClusterRoleBinding{} }, + func() *v1alpha1.ClusterRoleBindingList { return &v1alpha1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 43c16fde7..4a1876a7d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1alpha1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.Role, *v1alpha1.RoleList, *rbacv1alpha1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.Role { return &v1alpha1.Role{} }, + func() *v1alpha1.RoleList { return &v1alpha1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1alpha1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 3129c9b4e..6473132f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.RoleBinding, *v1alpha1.RoleBindingList, *rbacv1alpha1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.RoleBinding { return &v1alpha1.RoleBinding{} }, + func() *v1alpha1.RoleBindingList { return &v1alpha1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index a3d67f031..ed398333a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -55,143 +52,18 @@ type ClusterRoleInterface interface { // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ClusterRole, *v1beta1.ClusterRoleList, *rbacv1beta1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ClusterRole { return &v1beta1.ClusterRole{} }, + func() *v1beta1.ClusterRoleList { return &v1beta1.ClusterRoleList{} }), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1beta1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index ae39cbb9a..3010a99ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -55,143 +52,18 @@ type ClusterRoleBindingInterface interface { // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.ClusterRoleBinding, *v1beta1.ClusterRoleBindingList, *rbacv1beta1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.ClusterRoleBinding { return &v1beta1.ClusterRoleBinding{} }, + func() *v1beta1.ClusterRoleBindingList { return &v1beta1.ClusterRoleBindingList{} }), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index e789e42fe..92e51da1b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -55,154 +52,18 @@ type RoleInterface interface { // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1beta1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.Role, *v1beta1.RoleList, *rbacv1beta1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.Role { return &v1beta1.Role{} }, + func() *v1beta1.RoleList { return &v1beta1.RoleList{} }), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1beta1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 1461ba3b6..ad31bd051 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -55,154 +52,18 @@ type RoleBindingInterface interface { // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.RoleBinding, *v1beta1.RoleBindingList, *rbacv1beta1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.RoleBinding { return &v1beta1.RoleBinding{} }, + func() *v1beta1.RoleBindingList { return &v1beta1.RoleBindingList{} }), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1beta1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go new file mode 100644 index 000000000..35455dfa3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// DeviceClassesGetter has a method to return a DeviceClassInterface. +// A group's client should implement this interface. +type DeviceClassesGetter interface { + DeviceClasses() DeviceClassInterface +} + +// DeviceClassInterface has methods to work with DeviceClass resources. +type DeviceClassInterface interface { + Create(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.CreateOptions) (*v1alpha3.DeviceClass, error) + Update(ctx context.Context, deviceClass *v1alpha3.DeviceClass, opts v1.UpdateOptions) (*v1alpha3.DeviceClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.DeviceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.DeviceClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.DeviceClass, err error) + Apply(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.DeviceClass, err error) + DeviceClassExpansion +} + +// deviceClasses implements DeviceClassInterface +type deviceClasses struct { + *gentype.ClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration] +} + +// newDeviceClasses returns a DeviceClasses +func newDeviceClasses(c *ResourceV1alpha3Client) *deviceClasses { + return &deviceClasses{ + gentype.NewClientWithListAndApply[*v1alpha3.DeviceClass, *v1alpha3.DeviceClassList, *resourcev1alpha3.DeviceClassApplyConfiguration]( + "deviceclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha3.DeviceClass { return &v1alpha3.DeviceClass{} }, + func() *v1alpha3.DeviceClassList { return &v1alpha3.DeviceClassList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go new file mode 100644 index 000000000..fdb23fd37 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha3 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go new file mode 100644 index 000000000..747e564b7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +type DeviceClassExpansion interface{} + +type PodSchedulingContextExpansion interface{} + +type ResourceClaimExpansion interface{} + +type ResourceClaimTemplateExpansion interface{} + +type ResourceSliceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go new file mode 100644 index 000000000..af5984321 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. +// A group's client should implement this interface. +type PodSchedulingContextsGetter interface { + PodSchedulingContexts(namespace string) PodSchedulingContextInterface +} + +// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. +type PodSchedulingContextInterface interface { + Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha3.PodSchedulingContext, error) + Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha3.PodSchedulingContext, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.PodSchedulingContext, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.PodSchedulingContextList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) + Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) + PodSchedulingContextExpansion +} + +// podSchedulingContexts implements PodSchedulingContextInterface +type podSchedulingContexts struct { + *gentype.ClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration] +} + +// newPodSchedulingContexts returns a PodSchedulingContexts +func newPodSchedulingContexts(c *ResourceV1alpha3Client, namespace string) *podSchedulingContexts { + return &podSchedulingContexts{ + gentype.NewClientWithListAndApply[*v1alpha3.PodSchedulingContext, *v1alpha3.PodSchedulingContextList, *resourcev1alpha3.PodSchedulingContextApplyConfiguration]( + "podschedulingcontexts", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.PodSchedulingContext { return &v1alpha3.PodSchedulingContext{} }, + func() *v1alpha3.PodSchedulingContextList { return &v1alpha3.PodSchedulingContextList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go new file mode 100644 index 000000000..879f0990d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go @@ -0,0 +1,127 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "net/http" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type ResourceV1alpha3Interface interface { + RESTClient() rest.Interface + DeviceClassesGetter + PodSchedulingContextsGetter + ResourceClaimsGetter + ResourceClaimTemplatesGetter + ResourceSlicesGetter +} + +// ResourceV1alpha3Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha3Client struct { + restClient rest.Interface +} + +func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface { + return newDeviceClasses(c) +} + +func (c *ResourceV1alpha3Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { + return newPodSchedulingContexts(c, namespace) +} + +func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface { + return newResourceClaims(c, namespace) +} + +func (c *ResourceV1alpha3Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { + return newResourceClaimTemplates(c, namespace) +} + +func (c *ResourceV1alpha3Client) ResourceSlices() ResourceSliceInterface { + return newResourceSlices(c) +} + +// NewForConfig creates a new ResourceV1alpha3Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ResourceV1alpha3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ResourceV1alpha3Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ResourceV1alpha3Client{client}, nil +} + +// NewForConfigOrDie creates a new ResourceV1alpha3Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha3Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ResourceV1alpha3Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha3Client { + return &ResourceV1alpha3Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha3.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ResourceV1alpha3Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go new file mode 100644 index 000000000..2ac65c005 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimsGetter has a method to return a ResourceClaimInterface. +// A group's client should implement this interface. +type ResourceClaimsGetter interface { + ResourceClaims(namespace string) ResourceClaimInterface +} + +// ResourceClaimInterface has methods to work with ResourceClaim resources. +type ResourceClaimInterface interface { + Create(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.CreateOptions) (*v1alpha3.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, resourceClaim *v1alpha3.ResourceClaim, opts v1.UpdateOptions) (*v1alpha3.ResourceClaim, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaim, err error) + ResourceClaimExpansion +} + +// resourceClaims implements ResourceClaimInterface +type resourceClaims struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration] +} + +// newResourceClaims returns a ResourceClaims +func newResourceClaims(c *ResourceV1alpha3Client, namespace string) *resourceClaims { + return &resourceClaims{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaim, *v1alpha3.ResourceClaimList, *resourcev1alpha3.ResourceClaimApplyConfiguration]( + "resourceclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.ResourceClaim { return &v1alpha3.ResourceClaim{} }, + func() *v1alpha3.ResourceClaimList { return &v1alpha3.ResourceClaimList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 000000000..87997bfee --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. +// A group's client should implement this interface. +type ResourceClaimTemplatesGetter interface { + ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface +} + +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. +type ResourceClaimTemplateInterface interface { + Create(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha3.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *v1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha3.ResourceClaimTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceClaimTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceClaimTemplate, err error) + ResourceClaimTemplateExpansion +} + +// resourceClaimTemplates implements ResourceClaimTemplateInterface +type resourceClaimTemplates struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration] +} + +// newResourceClaimTemplates returns a ResourceClaimTemplates +func newResourceClaimTemplates(c *ResourceV1alpha3Client, namespace string) *resourceClaimTemplates { + return &resourceClaimTemplates{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceClaimTemplate, *v1alpha3.ResourceClaimTemplateList, *resourcev1alpha3.ResourceClaimTemplateApplyConfiguration]( + "resourceclaimtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha3.ResourceClaimTemplate { return &v1alpha3.ResourceClaimTemplate{} }, + func() *v1alpha3.ResourceClaimTemplateList { return &v1alpha3.ResourceClaimTemplateList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go new file mode 100644 index 000000000..081904140 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + "context" + + v1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceSlicesGetter has a method to return a ResourceSliceInterface. +// A group's client should implement this interface. +type ResourceSlicesGetter interface { + ResourceSlices() ResourceSliceInterface +} + +// ResourceSliceInterface has methods to work with ResourceSlice resources. +type ResourceSliceInterface interface { + Create(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.CreateOptions) (*v1alpha3.ResourceSlice, error) + Update(ctx context.Context, resourceSlice *v1alpha3.ResourceSlice, opts v1.UpdateOptions) (*v1alpha3.ResourceSlice, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha3.ResourceSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha3.ResourceSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.ResourceSlice, err error) + Apply(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.ResourceSlice, err error) + ResourceSliceExpansion +} + +// resourceSlices implements ResourceSliceInterface +type resourceSlices struct { + *gentype.ClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration] +} + +// newResourceSlices returns a ResourceSlices +func newResourceSlices(c *ResourceV1alpha3Client) *resourceSlices { + return &resourceSlices{ + gentype.NewClientWithListAndApply[*v1alpha3.ResourceSlice, *v1alpha3.ResourceSliceList, *resourcev1alpha3.ResourceSliceApplyConfiguration]( + "resourceslices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha3.ResourceSlice { return &v1alpha3.ResourceSlice{} }, + func() *v1alpha3.ResourceSliceList { return &v1alpha3.ResourceSliceList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go index c68ec5da4..a28ef2fd4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.PriorityClass, *v1.PriorityClassList, *schedulingv1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.PriorityClass { return &v1.PriorityClass{} }, + func() *v1.PriorityClassList { return &v1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index a9b8c19c7..5c78f3de9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.PriorityClass, *v1alpha1.PriorityClassList, *schedulingv1alpha1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.PriorityClass { return &v1alpha1.PriorityClass{} }, + func() *v1alpha1.PriorityClassList { return &v1alpha1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 155476e4c..9fef1d759 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -55,143 +52,18 @@ type PriorityClassInterface interface { // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.PriorityClass, *v1beta1.PriorityClassList, *schedulingv1beta1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.PriorityClass { return &v1beta1.PriorityClass{} }, + func() *v1beta1.PriorityClassList { return &v1beta1.PriorityClassList{} }), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1beta1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go index d9dc4151e..2e14db600 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -55,143 +52,18 @@ type CSIDriverInterface interface { // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CSIDriver, *v1.CSIDriverList, *storagev1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CSIDriver { return &v1.CSIDriver{} }, + func() *v1.CSIDriverList { return &v1.CSIDriverList{} }), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go index 17dbc8c1c..6d28d7ed1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -55,143 +52,18 @@ type CSINodeInterface interface { // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.CSINode, *v1.CSINodeList, *storagev1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.CSINode { return &v1.CSINode{} }, + func() *v1.CSINodeList { return &v1.CSINodeList{} }), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go new file mode 100644 index 000000000..8a762b9ff --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. +// A group's client should implement this interface. +type CSIStorageCapacitiesGetter interface { + CSIStorageCapacities(namespace string) CSIStorageCapacityInterface +} + +// CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources. +type CSIStorageCapacityInterface interface { + Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (*v1.CSIStorageCapacity, error) + Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (*v1.CSIStorageCapacity, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIStorageCapacity, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIStorageCapacityList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) + Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) + CSIStorageCapacityExpansion +} + +// cSIStorageCapacities implements CSIStorageCapacityInterface +type cSIStorageCapacities struct { + *gentype.ClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration] +} + +// newCSIStorageCapacities returns a CSIStorageCapacities +func newCSIStorageCapacities(c *StorageV1Client, namespace string) *cSIStorageCapacities { + return &cSIStorageCapacities{ + gentype.NewClientWithListAndApply[*v1.CSIStorageCapacity, *v1.CSIStorageCapacityList, *storagev1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.CSIStorageCapacity { return &v1.CSIStorageCapacity{} }, + func() *v1.CSIStorageCapacityList { return &v1.CSIStorageCapacityList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index af8111776..aa318d7d3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -22,6 +22,8 @@ type CSIDriverExpansion interface{} type CSINodeExpansion interface{} +type CSIStorageCapacityExpansion interface{} + type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index b31862f43..750fe8b62 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -30,6 +30,7 @@ type StorageV1Interface interface { RESTClient() rest.Interface CSIDriversGetter CSINodesGetter + CSIStorageCapacitiesGetter StorageClassesGetter VolumeAttachmentsGetter } @@ -47,6 +48,10 @@ func (c *StorageV1Client) CSINodes() CSINodeInterface { return newCSINodes(c) } +func (c *StorageV1Client) CSIStorageCapacities(namespace string) CSIStorageCapacityInterface { + return newCSIStorageCapacities(c, namespace) +} + func (c *StorageV1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 8e97d90a0..d7b6ff68a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -55,143 +52,18 @@ type StorageClassInterface interface { // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.StorageClass, *v1.StorageClassList, *storagev1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.StorageClass { return &v1.StorageClass{} }, + func() *v1.StorageClassList { return &v1.StorageClassList{} }), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index c1dbec84f..3a0404284 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -20,17 +20,14 @@ package v1 import ( "context" - json "encoding/json" - "fmt" - "time" v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1.VolumeAttachment, *v1.VolumeAttachmentList, *storagev1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.VolumeAttachment { return &v1.VolumeAttachment{} }, + func() *v1.VolumeAttachmentList { return &v1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go index bf5d64ddd..6819deff6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1alpha1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1alpha1.CSIStorageCapacity, *v1alpha1.CSIStorageCapacityList, *storagev1alpha1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.CSIStorageCapacity { return &v1alpha1.CSIStorageCapacity{} }, + func() *v1alpha1.CSIStorageCapacityList { return &v1alpha1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go index 0f51c85f9..436e910f2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -21,3 +21,5 @@ package v1alpha1 type CSIStorageCapacityExpansion interface{} type VolumeAttachmentExpansion interface{} + +type VolumeAttributesClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index c9bf11d76..63e3fc243 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -30,6 +30,7 @@ type StorageV1alpha1Interface interface { RESTClient() rest.Interface CSIStorageCapacitiesGetter VolumeAttachmentsGetter + VolumeAttributesClassesGetter } // StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. @@ -45,6 +46,10 @@ func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } +func (c *StorageV1alpha1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { + return newVolumeAttributesClasses(c) +} + // NewForConfig creates a new StorageV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 58abb748f..0982d5568 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -20,17 +20,14 @@ package v1alpha1 import ( "context" - json "encoding/json" - "fmt" - "time" v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttachment, *v1alpha1.VolumeAttachmentList, *storagev1alpha1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.VolumeAttachment { return &v1alpha1.VolumeAttachment{} }, + func() *v1alpha1.VolumeAttachmentList { return &v1alpha1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go new file mode 100644 index 000000000..40cff7588 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. +// A group's client should implement this interface. +type VolumeAttributesClassesGetter interface { + VolumeAttributesClasses() VolumeAttributesClassInterface +} + +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. +type VolumeAttributesClassInterface interface { + Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*v1alpha1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1alpha1.VolumeAttributesClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttributesClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) + VolumeAttributesClassExpansion +} + +// volumeAttributesClasses implements VolumeAttributesClassInterface +type volumeAttributesClasses struct { + *gentype.ClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration] +} + +// newVolumeAttributesClasses returns a VolumeAttributesClasses +func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { + return &volumeAttributesClasses{ + gentype.NewClientWithListAndApply[*v1alpha1.VolumeAttributesClass, *v1alpha1.VolumeAttributesClassList, *storagev1alpha1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.VolumeAttributesClass { return &v1alpha1.VolumeAttributesClass{} }, + func() *v1alpha1.VolumeAttributesClassList { return &v1alpha1.VolumeAttributesClassList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go index 04e677db0..2748919b4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -55,143 +52,18 @@ type CSIDriverInterface interface { // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CSIDriver, *v1beta1.CSIDriverList, *storagev1beta1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CSIDriver { return &v1beta1.CSIDriver{} }, + func() *v1beta1.CSIDriverList { return &v1beta1.CSIDriverList{} }), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1beta1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go index c3760b5ce..fe6fe228e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -55,143 +52,18 @@ type CSINodeInterface interface { // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1beta1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.CSINode, *v1beta1.CSINodeList, *storagev1beta1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.CSINode { return &v1beta1.CSINode{} }, + func() *v1beta1.CSINodeList { return &v1beta1.CSINodeList{} }), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1beta1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go index 98ba936dc..e9ffc1df9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -55,154 +52,18 @@ type CSIStorageCapacityInterface interface { // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1beta1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*v1beta1.CSIStorageCapacity, *v1beta1.CSIStorageCapacityList, *storagev1beta1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1beta1.CSIStorageCapacity { return &v1beta1.CSIStorageCapacity{} }, + func() *v1beta1.CSIStorageCapacityList { return &v1beta1.CSIStorageCapacityList{} }), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 1a202a928..ebf78e10b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -27,3 +27,5 @@ type CSIStorageCapacityExpansion interface{} type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} + +type VolumeAttributesClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 4c7604bd2..3d1b59e36 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -33,6 +33,7 @@ type StorageV1beta1Interface interface { CSIStorageCapacitiesGetter StorageClassesGetter VolumeAttachmentsGetter + VolumeAttributesClassesGetter } // StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. @@ -60,6 +61,10 @@ func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } +func (c *StorageV1beta1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { + return newVolumeAttributesClasses(c) +} + // NewForConfig creates a new StorageV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 9b4ef231c..fed699cc8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -55,143 +52,18 @@ type StorageClassInterface interface { // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1beta1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.StorageClass, *v1beta1.StorageClassList, *storagev1beta1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.StorageClass { return &v1beta1.StorageClass{} }, + func() *v1beta1.StorageClassList { return &v1beta1.StorageClassList{} }), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1beta1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 35a8b64fc..01024ce48 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -20,17 +20,14 @@ package v1beta1 import ( "context" - json "encoding/json" - "fmt" - "time" v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -43,6 +40,7 @@ type VolumeAttachmentsGetter interface { type VolumeAttachmentInterface interface { Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error @@ -51,193 +49,25 @@ type VolumeAttachmentInterface interface { Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*v1beta1.VolumeAttachment, *v1beta1.VolumeAttachmentList, *storagev1beta1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.VolumeAttachment { return &v1beta1.VolumeAttachment{} }, + func() *v1beta1.VolumeAttachmentList { return &v1beta1.VolumeAttachmentList{} }), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 000000000..47eadcac6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,69 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. +// A group's client should implement this interface. +type VolumeAttributesClassesGetter interface { + VolumeAttributesClasses() VolumeAttributesClassInterface +} + +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. +type VolumeAttributesClassInterface interface { + Create(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*v1beta1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *v1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1beta1.VolumeAttributesClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttributesClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttributesClass, err error) + VolumeAttributesClassExpansion +} + +// volumeAttributesClasses implements VolumeAttributesClassInterface +type volumeAttributesClasses struct { + *gentype.ClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration] +} + +// newVolumeAttributesClasses returns a VolumeAttributesClasses +func newVolumeAttributesClasses(c *StorageV1beta1Client) *volumeAttributesClasses { + return &volumeAttributesClasses{ + gentype.NewClientWithListAndApply[*v1beta1.VolumeAttributesClass, *v1beta1.VolumeAttributesClassList, *storagev1beta1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1beta1.VolumeAttributesClass { return &v1beta1.VolumeAttributesClass{} }, + func() *v1beta1.VolumeAttributesClassList { return &v1beta1.VolumeAttributesClassList{} }), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..89220c3ce --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type StorageVersionMigrationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go new file mode 100644 index 000000000..613e45355 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StoragemigrationV1alpha1Interface interface { + RESTClient() rest.Interface + StorageVersionMigrationsGetter +} + +// StoragemigrationV1alpha1Client is used to interact with features provided by the storagemigration.k8s.io group. +type StoragemigrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *StoragemigrationV1alpha1Client) StorageVersionMigrations() StorageVersionMigrationInterface { + return newStorageVersionMigrations(c) +} + +// NewForConfig creates a new StoragemigrationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*StoragemigrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new StoragemigrationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StoragemigrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &StoragemigrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new StoragemigrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StoragemigrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StoragemigrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *StoragemigrationV1alpha1Client { + return &StoragemigrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StoragemigrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go new file mode 100644 index 000000000..5fc0fd519 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface. +// A group's client should implement this interface. +type StorageVersionMigrationsGetter interface { + StorageVersionMigrations() StorageVersionMigrationInterface +} + +// StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources. +type StorageVersionMigrationInterface interface { + Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error) + Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) + Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + StorageVersionMigrationExpansion +} + +// storageVersionMigrations implements StorageVersionMigrationInterface +type storageVersionMigrations struct { + *gentype.ClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration] +} + +// newStorageVersionMigrations returns a StorageVersionMigrations +func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations { + return &storageVersionMigrations{ + gentype.NewClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]( + "storageversionmigrations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} }, + func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} }), + } +} diff --git a/vendor/k8s.io/client-go/openapi/OWNERS b/vendor/k8s.io/client-go/openapi/OWNERS new file mode 100644 index 000000000..e61009424 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse diff --git a/vendor/k8s.io/client-go/openapi/client.go b/vendor/k8s.io/client-go/openapi/client.go new file mode 100644 index 000000000..6a4305718 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/client.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "context" + "encoding/json" + "strings" + + "k8s.io/client-go/rest" + "k8s.io/kube-openapi/pkg/handler3" +) + +type Client interface { + Paths() (map[string]GroupVersion, error) +} + +type client struct { + // URL includes the `hash` query param to take advantage of cache busting + restClient rest.Interface +} + +func NewClient(restClient rest.Interface) Client { + return &client{ + restClient: restClient, + } +} + +func (c *client) Paths() (map[string]GroupVersion, error) { + data, err := c.restClient.Get(). + AbsPath("/openapi/v3"). + Do(context.TODO()). + Raw() + + if err != nil { + return nil, err + } + + discoMap := &handler3.OpenAPIV3Discovery{} + err = json.Unmarshal(data, discoMap) + if err != nil { + return nil, err + } + + // Create GroupVersions for each element of the result + result := map[string]GroupVersion{} + for k, v := range discoMap.Paths { + // If the server returned a URL rooted at /openapi/v3, preserve any additional client-side prefix. + // If the server returned a URL not rooted at /openapi/v3, treat it as an actual server-relative URL. + // See https://github.com/kubernetes/kubernetes/issues/117463 for details + useClientPrefix := strings.HasPrefix(v.ServerRelativeURL, "/openapi/v3") + result[k] = newGroupVersion(c, v, useClientPrefix) + } + return result, nil +} diff --git a/vendor/k8s.io/client-go/openapi/groupversion.go b/vendor/k8s.io/client-go/openapi/groupversion.go new file mode 100644 index 000000000..601dcbe3c --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/groupversion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "context" + "net/url" + + "k8s.io/kube-openapi/pkg/handler3" +) + +const ContentTypeOpenAPIV3PB = "application/com.github.proto-openapi.spec.v3@v1.0+protobuf" + +type GroupVersion interface { + Schema(contentType string) ([]byte, error) +} + +type groupversion struct { + client *client + item handler3.OpenAPIV3DiscoveryGroupVersion + useClientPrefix bool +} + +func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion, useClientPrefix bool) *groupversion { + return &groupversion{client: client, item: item, useClientPrefix: useClientPrefix} +} + +func (g *groupversion) Schema(contentType string) ([]byte, error) { + if !g.useClientPrefix { + return g.client.restClient.Get(). + RequestURI(g.item.ServerRelativeURL). + SetHeader("Accept", contentType). + Do(context.TODO()). + Raw() + } + + locator, err := url.Parse(g.item.ServerRelativeURL) + if err != nil { + return nil, err + } + + path := g.client.restClient.Get(). + AbsPath(locator.Path). + SetHeader("Accept", contentType) + + // Other than root endpoints(openapiv3/apis), resources have hash query parameter to support etags. + // However, absPath does not support handling query parameters internally, + // so that hash query parameter is added manually + for k, value := range locator.Query() { + for _, v := range value { + path.Param(k, v) + } + } + + return path.Do(context.TODO()).Raw() +} diff --git a/vendor/k8s.io/client-go/openapi/typeconverter.go b/vendor/k8s.io/client-go/openapi/typeconverter.go new file mode 100644 index 000000000..4b91e66d4 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/typeconverter.go @@ -0,0 +1,48 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +func NewTypeConverter(client Client, preserveUnknownFields bool) (managedfields.TypeConverter, error) { + spec := map[string]*spec.Schema{} + paths, err := client.Paths() + if err != nil { + return nil, fmt.Errorf("failed to list paths: %w", err) + } + for _, gv := range paths { + s, err := gv.Schema("application/json") + if err != nil { + return nil, fmt.Errorf("failed to download schema: %w", err) + } + var openapi spec3.OpenAPI + if err := json.Unmarshal(s, &openapi); err != nil { + return nil, fmt.Errorf("failed to parse schema: %w", err) + } + for k, v := range openapi.Components.Schemas { + spec[k] = v + } + } + return managedfields.NewTypeConverter(spec, preserveUnknownFields) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS index e0ec62deb..4dfbb98ae 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -2,8 +2,7 @@ # approval on api packages bubbles to api-approvers reviewers: -- sig-auth-authenticators-approvers -- sig-auth-authenticators-reviewers + - sig-auth-authenticators-approvers + - sig-auth-authenticators-reviewers labels: -- sig/auth - + - sig/auth diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go index 9040bb9a4..ee5c338e3 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go @@ -23,7 +23,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/v1" - "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" ) @@ -32,5 +31,4 @@ func Install(scheme *runtime.Scheme) { utilruntime.Must(clientauthentication.AddToScheme(scheme)) utilruntime.Must(v1.AddToScheme(scheme)) utilruntime.Must(v1beta1.AddToScheme(scheme)) - utilruntime.Must(v1alpha1.AddToScheme(scheme)) } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go index 8daaa3f8f..dae1bf1b0 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -41,11 +41,6 @@ type ExecCredential struct { // ExecCredentialSpec holds request and runtime specific information provided by // the transport. type ExecCredentialSpec struct { - // Response is populated when the transport encounters HTTP status codes, such as 401, - // suggesting previous credentials were invalid. - // +optional - Response *Response - // Interactive is true when the transport detects the command is being called from an // interactive prompt, i.e., when stdin has been passed to this exec plugin. // +optional @@ -75,15 +70,6 @@ type ExecCredentialStatus struct { ClientKeyData string `datapolicy:"secret-key"` } -// Response defines metadata about a failed request, including HTTP status code and -// response headers. -type Response struct { - // Headers holds HTTP headers returned by the server. - Header map[string][]string - // Code is the HTTP status code returned by the server. - Code int32 -} - // Cluster contains information to allow an exec plugin to communicate // with the kubernetes cluster being authenticated to. // @@ -112,6 +98,11 @@ type Cluster struct { // cluster. // +optional ProxyURL string + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go index 0c4754dd2..b9082e814 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go @@ -96,6 +96,11 @@ type Cluster struct { // cluster. // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go index 277d9d93e..9c3f63fca 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go @@ -62,6 +62,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { @@ -72,11 +77,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } return nil } @@ -86,6 +86,7 @@ func autoConvert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *cl out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { return err } @@ -103,6 +104,7 @@ func autoConvert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentica out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { return err } @@ -160,7 +162,6 @@ func Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in } func autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - // WARNING: in.Response requires manual conversion: does not exist in peer-type out.Interactive = in.Interactive if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster @@ -174,6 +175,11 @@ func autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpe return nil } +// Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in, out, s) +} + func autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go deleted file mode 100644 index 1ff13c438..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExecCredential is used by exec-based plugins to communicate credentials to -// HTTP transports. -type ExecCredential struct { - metav1.TypeMeta `json:",inline"` - - // Spec holds information passed to the plugin by the transport. This contains - // request and runtime specific information, such as if the session is interactive. - Spec ExecCredentialSpec `json:"spec,omitempty"` - - // Status is filled in by the plugin and holds the credentials that the transport - // should use to contact the API. - // +optional - Status *ExecCredentialStatus `json:"status,omitempty"` -} - -// ExecCredentialSpec holds request and runtime specific information provided by -// the transport. -type ExecCredentialSpec struct { - // Response is populated when the transport encounters HTTP status codes, such as 401, - // suggesting previous credentials were invalid. - // +optional - Response *Response `json:"response,omitempty"` - - // Interactive is true when the transport detects the command is being called from an - // interactive prompt. - // +optional - Interactive bool `json:"interactive,omitempty"` -} - -// ExecCredentialStatus holds credentials for the transport to use. -// -// Token and ClientKeyData are sensitive fields. This data should only be -// transmitted in-memory between client and exec plugin process. Exec plugin -// itself should at least be protected via file permissions. -type ExecCredentialStatus struct { - // ExpirationTimestamp indicates a time when the provided credentials expire. - // +optional - ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` - // Token is a bearer token used by the client for request authentication. - Token string `json:"token,omitempty" datapolicy:"token"` - // PEM-encoded client TLS certificates (including intermediates, if any). - ClientCertificateData string `json:"clientCertificateData,omitempty"` - // PEM-encoded private key for the above certificate. - ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"` -} - -// Response defines metadata about a failed request, including HTTP status code and -// response headers. -type Response struct { - // Header holds HTTP headers returned by the server. - Header map[string][]string `json:"header,omitempty"` - // Code is the HTTP status code returned by the server. - Code int32 `json:"code,omitempty"` -} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index fc59decef..000000000 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,173 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Response)(nil), (*clientauthentication.Response)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Response_To_clientauthentication_Response(a.(*Response), b.(*clientauthentication.Response), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*clientauthentication.Response)(nil), (*Response)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_Response_To_v1alpha1_Response(a.(*clientauthentication.Response), b.(*Response), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { - if err := Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) - return nil -} - -// Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. -func Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { - return autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) -} - -func autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { - if err := Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) - return nil -} - -// Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential is an autogenerated conversion function. -func Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { - return autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in, out, s) -} - -func autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { - out.Response = (*clientauthentication.Response)(unsafe.Pointer(in.Response)) - out.Interactive = in.Interactive - return nil -} - -// Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. -func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) -} - -func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - out.Response = (*Response)(unsafe.Pointer(in.Response)) - out.Interactive = in.Interactive - // WARNING: in.Cluster requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { - out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) - out.Token = in.Token - out.ClientCertificateData = in.ClientCertificateData - out.ClientKeyData = in.ClientKeyData - return nil -} - -// Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. -func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) -} - -func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { - out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) - out.Token = in.Token - out.ClientCertificateData = in.ClientCertificateData - out.ClientKeyData = in.ClientKeyData - return nil -} - -// Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus is an autogenerated conversion function. -func Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { - return autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in, out, s) -} - -func autoConvert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { - out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) - out.Code = in.Code - return nil -} - -// Convert_v1alpha1_Response_To_clientauthentication_Response is an autogenerated conversion function. -func Convert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { - return autoConvert_v1alpha1_Response_To_clientauthentication_Response(in, out, s) -} - -func autoConvert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { - out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) - out.Code = in.Code - return nil -} - -// Convert_clientauthentication_Response_To_v1alpha1_Response is an autogenerated conversion function. -func Convert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { - return autoConvert_clientauthentication_Response_To_v1alpha1_Response(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go index 714b0273a..918eb11e1 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -96,6 +96,11 @@ type Cluster struct { // cluster. // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Config holds additional config data that is specific to the exec // plugin with regards to the cluster being authenticated to. // diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go index c82993897..5372da151 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -62,6 +62,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) }); err != nil { @@ -72,11 +77,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) - }); err != nil { - return err - } return nil } @@ -86,6 +86,7 @@ func autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, ou out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil { return err } @@ -103,6 +104,7 @@ func autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthe out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil { return err } @@ -160,7 +162,6 @@ func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSp } func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - // WARNING: in.Response requires manual conversion: does not exist in peer-type out.Interactive = in.Interactive if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster @@ -174,6 +175,11 @@ func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredenti return nil } +// Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s) +} + func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go index 3103629f6..244d54ce3 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -83,11 +83,6 @@ func (in *ExecCredential) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { *out = *in - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = new(Response) - (*in).DeepCopyInto(*out) - } if in.Cluster != nil { in, out := &in.Cluster, &out.Cluster *out = new(Cluster) @@ -125,34 +120,3 @@ func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Response) DeepCopyInto(out *Response) { - *out = *in - if in.Header != nil { - in, out := &in.Header, &out.Header - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. -func (in *Response) DeepCopy() *Response { - if in == nil { - return nil - } - out := new(Response) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 51e34dda3..676d51d32 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -43,7 +43,8 @@ var ( gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see - // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md + // https://github.com/kubernetes/sig-release/blob/master/release-engineering/versioning.md#kubernetes-release-versioning + // https://kubernetes.io/releases/version-skew-policy/ // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 66649a060..b471f5cc6 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -32,17 +32,16 @@ import ( "sync" "time" - "github.com/davecgh/go-spew/spew" "golang.org/x/term" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/dump" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/install" clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1" - clientauthenticationv1alpha1 "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/metrics" @@ -73,9 +72,8 @@ var ( globalCache = newCache() // The list of API versions we accept. apiVersions = map[string]schema.GroupVersion{ - clientauthenticationv1alpha1.SchemeGroupVersion.String(): clientauthenticationv1alpha1.SchemeGroupVersion, - clientauthenticationv1beta1.SchemeGroupVersion.String(): clientauthenticationv1beta1.SchemeGroupVersion, - clientauthenticationv1.SchemeGroupVersion.String(): clientauthenticationv1.SchemeGroupVersion, + clientauthenticationv1beta1.SchemeGroupVersion.String(): clientauthenticationv1beta1.SchemeGroupVersion, + clientauthenticationv1.SchemeGroupVersion.String(): clientauthenticationv1.SchemeGroupVersion, } ) @@ -83,8 +81,6 @@ func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } -var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} - func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string { key := struct { conf *api.ExecConfig @@ -93,7 +89,7 @@ func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) strin conf: conf, cluster: cluster, } - return spewConfig.Sprint(key) + return dump.Pretty(key) } type cache struct { @@ -298,8 +294,8 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { // also configured to allow client certificates for authentication. For requests // like "kubectl get --token (token) pods" we should assume the intention is to // use the provided token for authentication. The same can be said for when the - // user specifies basic auth. - if c.HasTokenAuth() || c.HasBasicAuth() { + // user specifies basic auth or cert auth. + if c.HasTokenAuth() || c.HasBasicAuth() || c.HasCertAuth() { return nil } @@ -307,20 +303,21 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { return &roundTripper{a, rt} }) - if c.TLS.GetCert != nil { + if c.HasCertCallback() { return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") } - c.TLS.GetCert = a.getCert.GetCert c.TLS.GetCertHolder = a.getCert // comparable for TLS config caching - if c.Dial != nil { + if c.DialHolder != nil { + if c.DialHolder.Dial == nil { + return errors.New("invalid transport.Config.DialHolder: wrapped Dial function is nil") + } + // if c has a custom dialer, we have to wrap it // TLS config caching is not supported for this config - d := connrotation.NewDialerWithTracker(c.Dial, a.connTracker) - c.Dial = d.DialContext - c.DialHolder = nil + d := connrotation.NewDialerWithTracker(c.DialHolder.Dial, a.connTracker) + c.DialHolder = &transport.DialHolder{Dial: d.DialContext} } else { - c.Dial = a.dial.Dial c.DialHolder = a.dial // comparable for TLS config caching } @@ -358,11 +355,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return nil, err } if res.StatusCode == http.StatusUnauthorized { - resp := &clientauthentication.Response{ - Header: res.Header, - Code: int32(res.StatusCode), - } - if err := r.a.maybeRefreshCreds(creds, resp); err != nil { + if err := r.a.maybeRefreshCreds(creds); err != nil { klog.Errorf("refreshing credentials: %v", err) } } @@ -392,7 +385,7 @@ func (a *Authenticator) getCreds() (*credentials, error) { return a.cachedCreds, nil } - if err := a.refreshCredsLocked(nil); err != nil { + if err := a.refreshCredsLocked(); err != nil { return nil, err } @@ -401,7 +394,7 @@ func (a *Authenticator) getCreds() (*credentials, error) { // maybeRefreshCreds executes the plugin to force a rotation of the // credentials, unless they were rotated already. -func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentication.Response) error { +func (a *Authenticator) maybeRefreshCreds(creds *credentials) error { a.mu.Lock() defer a.mu.Unlock() @@ -412,12 +405,12 @@ func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentic return nil } - return a.refreshCredsLocked(r) + return a.refreshCredsLocked() } // refreshCredsLocked executes the plugin and reads the credentials from // stdout. It must be called while holding the Authenticator's mutex. -func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error { +func (a *Authenticator) refreshCredsLocked() error { interactive, err := a.interactiveFunc() if err != nil { return fmt.Errorf("exec plugin cannot support interactive mode: %w", err) @@ -425,7 +418,6 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err cred := &clientauthentication.ExecCredential{ Spec: clientauthentication.ExecCredentialSpec{ - Response: r, Interactive: interactive, }, } diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS index 597484b4a..7b23294c4 100644 --- a/vendor/k8s.io/client-go/rest/OWNERS +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -1,22 +1,14 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- thockin -- smarterclayton -- caesarxuchao -- wojtek-t -- deads2k -- brendandburns -- liggitt -- sttts -- luxas -- dims -- errordeveloper -- hongchaodeng -- krousey -- resouer -- cjcullen -- rmmh -- asalkeld -- juanvallejo -- lojies + - thockin + - smarterclayton + - caesarxuchao + - wojtek-t + - deads2k + - liggitt + - sttts + - luxas + - dims + - cjcullen + - lojies diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go index 2cf821bcd..60df7e568 100644 --- a/vendor/k8s.io/client-go/rest/client.go +++ b/vendor/k8s.io/client-go/rest/client.go @@ -52,8 +52,7 @@ type Interface interface { // ClientContentConfig controls how RESTClient communicates with the server. // // TODO: ContentConfig will be updated to accept a Negotiator instead of a -// -// NegotiatedSerializer and NegotiatedSerializer will be removed. +// NegotiatedSerializer and NegotiatedSerializer will be removed. type ClientContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 4909dc53a..f8ff7e928 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "io/ioutil" "net" "net/http" "net/url" @@ -317,7 +316,7 @@ func RESTClientFor(config *Config) (*RESTClient, error) { // Validate config.Host before constructing the transport/client so we can fail fast. // ServerURL will be obtained later in RESTClientForConfigAndClient() - _, _, err := defaultServerUrlFor(config) + _, _, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -344,7 +343,7 @@ func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RES return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + baseURL, versionedAPIPath, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -391,7 +390,7 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { // Validate config.Host before constructing the transport/client so we can fail fast. // ServerURL will be obtained later in UnversionedRESTClientForConfigAndClient() - _, _, err := defaultServerUrlFor(config) + _, _, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -411,7 +410,7 @@ func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Cl return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") } - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + baseURL, versionedAPIPath, err := DefaultServerUrlFor(config) if err != nil { return nil, err } @@ -519,7 +518,7 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - token, err := ioutil.ReadFile(tokenFile) + token, err := os.ReadFile(tokenFile) if err != nil { return nil, err } @@ -549,7 +548,7 @@ func InClusterConfig() (*Config, error) { // Note: the Insecure flag is ignored when testing for this value, so MITM attacks are // still possible. func IsConfigTransportTLS(config Config) bool { - baseURL, _, err := defaultServerUrlFor(&config) + baseURL, _, err := DefaultServerUrlFor(&config) if err != nil { return false } @@ -572,10 +571,7 @@ func LoadTLSFiles(c *Config) error { } c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil + return err } // dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, @@ -585,7 +581,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/k8s.io/client-go/rest/exec.go b/vendor/k8s.io/client-go/rest/exec.go index 5f3b43c55..96f5dba35 100644 --- a/vendor/k8s.io/client-go/rest/exec.go +++ b/vendor/k8s.io/client-go/rest/exec.go @@ -21,7 +21,6 @@ import ( "net/http" "net/url" - "k8s.io/client-go/pkg/apis/clientauthentication" clientauthenticationapi "k8s.io/client-go/pkg/apis/clientauthentication" ) @@ -50,12 +49,13 @@ func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, erro } } - return &clientauthentication.Cluster{ + return &clientauthenticationapi.Cluster{ Server: config.Host, TLSServerName: config.ServerName, InsecureSkipTLSVerify: config.Insecure, CertificateAuthorityData: caData, ProxyURL: proxyURL, + DisableCompression: config.DisableCompression, Config: config.ExecProvider.Config, }, nil } @@ -63,7 +63,7 @@ func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, erro // ExecClusterToConfig creates a Config with the corresponding fields from the provided // clientauthenticationapi.Cluster. The returned Config will be anonymous (i.e., it will not have // any authentication-related fields set). -func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) { +func ExecClusterToConfig(cluster *clientauthenticationapi.Cluster) (*Config, error) { var proxy func(*http.Request) (*url.URL, error) if cluster.ProxyURL != "" { proxyURL, err := url.Parse(cluster.ProxyURL) @@ -80,6 +80,7 @@ func ExecClusterToConfig(cluster *clientauthentication.Cluster) (*Config, error) ServerName: cluster.TLSServerName, CAData: cluster.CertificateAuthorityData, }, - Proxy: proxy, + Proxy: proxy, + DisableCompression: cluster.DisableCompression, }, nil } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 5ecb43b77..f5a9f68ca 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -22,10 +22,11 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "mime" "net/http" + "net/http/httptrace" "net/url" + "os" "path" "reflect" "strconv" @@ -36,12 +37,15 @@ import ( "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" @@ -441,7 +445,7 @@ func (r *Request) Body(obj interface{}) *Request { } switch t := obj.(type) { case string: - data, err := ioutil.ReadFile(t) + data, err := os.ReadFile(t) if err != nil { r.err = err return r @@ -481,7 +485,13 @@ func (r *Request) Body(obj interface{}) *Request { return r } -// URL returns the current working URL. +// Error returns any error encountered constructing the request, if any. +func (r *Request) Error() error { + return r.err +} + +// URL returns the current working URL. Check the result of Error() to ensure +// that the returned URL is valid. func (r *Request) URL() *url.URL { p := r.pathPrefix if r.namespaceSet && len(r.namespace) > 0 { @@ -527,14 +537,17 @@ func (r Request) finalURLTemplate() url.URL { newParams[k] = v } r.params = newParams - url := r.URL() + u := r.URL() + if u == nil { + return url.URL{} + } - segments := strings.Split(url.Path, "/") + segments := strings.Split(u.Path, "/") groupIndex := 0 index := 0 trimmedBasePath := "" - if url != nil && r.c.base != nil && strings.Contains(url.Path, r.c.base.Path) { - p := strings.TrimPrefix(url.Path, r.c.base.Path) + if r.c.base != nil && strings.Contains(u.Path, r.c.base.Path) { + p := strings.TrimPrefix(u.Path, r.c.base.Path) if !strings.HasPrefix(p, "/") { p = "/" + p } @@ -545,7 +558,7 @@ func (r Request) finalURLTemplate() url.URL { groupIndex = 1 } if len(segments) <= 2 { - return *url + return *u } const CoreGroupPrefix = "api" @@ -563,11 +576,11 @@ func (r Request) finalURLTemplate() url.URL { // outlet here in case more API groups are added in future if ever possible: // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups // if a wrong API groups name is encountered, return the {prefix} for url.Path - url.Path = "/{prefix}" - url.RawQuery = "" - return *url + u.Path = "/{prefix}" + u.RawQuery = "" + return *u } - //switch segLength := len(segments) - index; segLength { + // switch segLength := len(segments) - index; segLength { switch { // case len(segments) - index == 1: // resource (with no name) do nothing @@ -590,8 +603,8 @@ func (r Request) finalURLTemplate() url.URL { segments[index+3] = "{name}" } } - url.Path = path.Join(trimmedBasePath, path.Join(segments...)) - return *url + u.Path = path.Join(trimmedBasePath, path.Join(segments...)) + return *u } func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) error { @@ -602,7 +615,9 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err now := time.Now() err := r.rateLimiter.Wait(ctx) - + if err != nil { + err = fmt.Errorf("client rate limiter Wait returned an error: %w", err) + } latency := time.Since(now) var message string @@ -708,36 +723,20 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { } return false } - - var retryAfter *RetryAfter + retry := r.retryFn(r.maxRetries) url := r.URL().String() - withRetry := r.retryFn(r.maxRetries) for { + if err := retry.Before(ctx, r); err != nil { + return nil, retry.WrapPreviousError(err) + } + req, err := r.newHTTPRequest(ctx) if err != nil { return nil, err } - r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) - if retryAfter != nil { - // We are retrying the request that we already send to apiserver - // at least once before. - // This request should also be throttled with the client-internal rate limiter. - if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil { - return nil, err - } - retryAfter = nil - } - resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) - if r.c.base != nil { - if err != nil { - r.backoff.UpdateBackoff(r.c.base, err, 0) - } else { - r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode) - } - } + retry.After(ctx, r, resp, err) if err == nil && resp.StatusCode == http.StatusOK { return r.newStreamWatcher(resp) } @@ -745,14 +744,8 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { done, transformErr := func() (bool, error) { defer readAndCloseResponseBody(resp) - var retry bool - retryAfter, retry = withRetry.NextRetry(r.body, req, resp, err, isErrRetryableFunc) - if retry { - err := withRetry.BeforeNextRetry(ctx, r.backoff, retryAfter, url) - if err == nil { - return false, nil - } - klog.V(4).Infof("Could not retry request - %v", err) + if retry.IsNextRetry(ctx, r, req, resp, err, isErrRetryableFunc) { + return false, nil } if resp == nil { @@ -773,7 +766,143 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { // we need to return the error object from that. err = transformErr } - return nil, err + return nil, retry.WrapPreviousError(err) + } + } +} + +type WatchListResult struct { + // err holds any errors we might have received + // during streaming. + err error + + // items hold the collected data + items []runtime.Object + + // initialEventsEndBookmarkRV holds the resource version + // extracted from the bookmark event that marks + // the end of the stream. + initialEventsEndBookmarkRV string + + // gv represents the API version + // it is used to construct the final list response + // normally this information is filled by the server + gv schema.GroupVersion +} + +func (r WatchListResult) Into(obj runtime.Object) error { + if r.err != nil { + return r.err + } + + listPtr, err := meta.GetItemsPtr(obj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + + if len(r.items) == 0 { + listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0)) + } else { + listVal.Set(reflect.MakeSlice(listVal.Type(), len(r.items), len(r.items))) + for i, o := range r.items { + if listVal.Type().Elem() != reflect.TypeOf(o).Elem() { + return fmt.Errorf("received object type = %v at index = %d, doesn't match the list item type = %v", reflect.TypeOf(o).Elem(), i, listVal.Type().Elem()) + } + listVal.Index(i).Set(reflect.ValueOf(o).Elem()) + } + } + + listMeta, err := meta.ListAccessor(obj) + if err != nil { + return err + } + listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV) + + typeMeta, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + version := r.gv.String() + typeMeta.SetAPIVersion(version) + typeMeta.SetKind(reflect.TypeOf(obj).Elem().Name()) + + return nil +} + +// WatchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// Note that the watchlist requires properly setting the ListOptions +// otherwise it just establishes a regular watch with the server. +// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists +// to see what parameters are currently required. +func (r *Request) WatchList(ctx context.Context) WatchListResult { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)} + } + // TODO(#115478): consider validating request parameters (i.e sendInitialEvents). + // Most users use the generated client, which handles the proper setting of parameters. + // We don't have validation for other methods (e.g., the Watch) + // thus, for symmetry, we haven't added additional checks for the WatchList method. + w, err := r.Watch(ctx) + if err != nil { + return WatchListResult{err: err} + } + return r.handleWatchList(ctx, w) +} + +// handleWatchList holds the actual logic for easier unit testing. +// Note that this function will close the passed watch. +func (r *Request) handleWatchList(ctx context.Context, w watch.Interface) WatchListResult { + defer w.Stop() + var lastKey string + var items []runtime.Object + + for { + select { + case <-ctx.Done(): + return WatchListResult{err: ctx.Err()} + case event, ok := <-w.ResultChan(): + if !ok { + return WatchListResult{err: fmt.Errorf("unexpected watch close")} + } + if event.Type == watch.Error { + return WatchListResult{err: errors.FromObject(event.Object)} + } + meta, err := meta.Accessor(event.Object) + if err != nil { + return WatchListResult{err: fmt.Errorf("failed to parse watch event: %#v", event)} + } + + switch event.Type { + case watch.Added: + // the following check ensures that the response is ordered. + // earlier servers had a bug that caused them to not sort the output. + // in such cases, return an error which can trigger fallback logic. + key := objectKeyFromMeta(meta) + if len(lastKey) > 0 && lastKey > key { + return WatchListResult{err: fmt.Errorf("cannot add the obj (%#v) with the key = %s, as it violates the ordering guarantees provided by the watchlist feature in beta phase, lastInsertedKey was = %s", event.Object, key, lastKey)} + } + items = append(items, event.Object) + lastKey = key + case watch.Bookmark: + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + return WatchListResult{ + items: items, + initialEventsEndBookmarkRV: meta.GetResourceVersion(), + gv: r.c.content.GroupVersion, + } + } + default: + return WatchListResult{err: fmt.Errorf("unexpected watch event %#v, expected to only receive watch.Added and watch.Bookmark events", event)} + } } } } @@ -802,22 +931,36 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) ), nil } -// updateURLMetrics is a convenience function for pushing metrics. -// It also handles corner cases for incomplete/invalid request data. -func updateURLMetrics(ctx context.Context, req *Request, resp *http.Response, err error) { - url := "none" +// updateRequestResultMetric increments the RequestResult metric counter, +// it should be called with the (response, err) tuple from the final +// reply from the server. +func updateRequestResultMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestResult.Increment(ctx, code, req.verb, host) +} + +// updateRequestRetryMetric increments the RequestRetry metric counter, +// it should be called with the (response, err) tuple for each retry +// except for the final attempt. +func updateRequestRetryMetric(ctx context.Context, req *Request, resp *http.Response, err error) { + code, host := sanitize(req, resp, err) + metrics.RequestRetry.IncrementRetry(ctx, code, req.verb, host) +} + +func sanitize(req *Request, resp *http.Response, err error) (string, string) { + host := "none" if req.c.base != nil { - url = req.c.base.Host + host = req.c.base.Host } // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric // system so we just report them as ``. - if err != nil { - metrics.RequestResult.Increment(ctx, "", req.verb, url) - } else { - //Metrics for failure codes - metrics.RequestResult.Increment(ctx, strconv.Itoa(resp.StatusCode), req.verb, url) + code := "" + if resp != nil { + code = strconv.Itoa(resp.StatusCode) } + + return code, host } // Stream formats and executes the request, and offers streaming of the response. @@ -838,35 +981,19 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { client = http.DefaultClient } - var retryAfter *RetryAfter - withRetry := r.retryFn(r.maxRetries) + retry := r.retryFn(r.maxRetries) url := r.URL().String() for { - req, err := r.newHTTPRequest(ctx) - if err != nil { + if err := retry.Before(ctx, r); err != nil { return nil, err } - r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) - if retryAfter != nil { - // We are retrying the request that we already send to apiserver - // at least once before. - // This request should also be throttled with the client-internal rate limiter. - if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil { - return nil, err - } - retryAfter = nil + req, err := r.newHTTPRequest(ctx) + if err != nil { + return nil, err } - resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) - if r.c.base != nil { - if err != nil { - r.backoff.UpdateBackoff(r.URL(), err, 0) - } else { - r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) - } - } + retry.After(ctx, r, resp, err) if err != nil { // we only retry on an HTTP response with 'Retry-After' header return nil, err @@ -881,14 +1008,8 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { done, transformErr := func() (bool, error) { defer resp.Body.Close() - var retry bool - retryAfter, retry = withRetry.NextRetry(r.body, req, resp, err, neverRetryError) - if retry { - err := withRetry.BeforeNextRetry(ctx, r.backoff, retryAfter, url) - if err == nil { - return false, nil - } - klog.V(4).Infof("Could not retry request - %v", err) + if retry.IsNextRetry(ctx, r, req, resp, err, neverRetryError) { + return false, nil } result := r.transformResponse(resp, req) if err := result.Error(); err != nil { @@ -944,21 +1065,44 @@ func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) { } url := r.URL().String() - req, err := http.NewRequest(r.verb, url, body) + req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, newDNSMetricsTrace(ctx)), r.verb, url, body) if err != nil { return nil, err } - req = req.WithContext(ctx) req.Header = r.headers return req, nil } +// newDNSMetricsTrace returns an HTTP trace that tracks time spent on DNS lookups per host. +// This metric is available in client as "rest_client_dns_resolution_duration_seconds". +func newDNSMetricsTrace(ctx context.Context) *httptrace.ClientTrace { + type dnsMetric struct { + start time.Time + host string + sync.Mutex + } + dns := &dnsMetric{} + return &httptrace.ClientTrace{ + DNSStart: func(info httptrace.DNSStartInfo) { + dns.Lock() + defer dns.Unlock() + dns.start = time.Now() + dns.host = info.Host + }, + DNSDone: func(info httptrace.DNSDoneInfo) { + dns.Lock() + defer dns.Unlock() + metrics.ResolverLatency.Observe(ctx, dns.host, time.Since(dns.start)) + }, + } +} + // request connects to the server and invokes the provided function when a server response is // received. It handles retry behavior and up front validation of requests. It will invoke // fn at most once. It will return an error if a problem occurred prior to connecting to the // server - the provided function is responsible for handling server errors. func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Response)) error { - //Metrics for total request latency + // Metrics for total request latency start := time.Now() defer func() { metrics.RequestLatency.Observe(ctx, r.verb, r.finalURLTemplate(), time.Since(start)) @@ -991,37 +1135,42 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp defer cancel() } + isErrRetryableFunc := func(req *http.Request, err error) bool { + // "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors. + // Thus in case of "GET" operations, we simply retry it. + // We are not automatically retrying "write" operations, as they are not idempotent. + if req.Method != "GET" { + return false + } + // For connection errors and apiserver shutdown errors retry. + if net.IsConnectionReset(err) || net.IsProbableEOF(err) { + return true + } + return false + } + // Right now we make about ten retry attempts if we get a Retry-After response. - var retryAfter *RetryAfter - withRetry := r.retryFn(r.maxRetries) + retry := r.retryFn(r.maxRetries) for { + if err := retry.Before(ctx, r); err != nil { + return retry.WrapPreviousError(err) + } req, err := r.newHTTPRequest(ctx) if err != nil { return err } - - r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL())) - if retryAfter != nil { - // We are retrying the request that we already send to apiserver - // at least once before. - // This request should also be throttled with the client-internal rate limiter. - if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil { - return err - } - retryAfter = nil - } resp, err := client.Do(req) - updateURLMetrics(ctx, r, resp, err) - if err != nil { - r.backoff.UpdateBackoff(r.URL(), err, 0) - } else { - r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode) + // The value -1 or a value of 0 with a non-nil Body indicates that the length is unknown. + // https://pkg.go.dev/net/http#Request + if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) { + metrics.RequestSize.Observe(ctx, r.verb, r.URL().Host, float64(req.ContentLength)) } + retry.After(ctx, r, resp, err) done := func() bool { defer readAndCloseResponseBody(resp) - // if the the server returns an error in err, the response will be nil. + // if the server returns an error in err, the response will be nil. f := func(req *http.Request, resp *http.Response) { if resp == nil { return @@ -1029,33 +1178,15 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp fn(req, resp) } - var retry bool - retryAfter, retry = withRetry.NextRetry(r.body, req, resp, err, func(req *http.Request, err error) bool { - // "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors. - // Thus in case of "GET" operations, we simply retry it. - // We are not automatically retrying "write" operations, as they are not idempotent. - if r.verb != "GET" { - return false - } - // For connection errors and apiserver shutdown errors retry. - if net.IsConnectionReset(err) || net.IsProbableEOF(err) { - return true - } + if retry.IsNextRetry(ctx, r, req, resp, err, isErrRetryableFunc) { return false - }) - if retry { - err := withRetry.BeforeNextRetry(ctx, r.backoff, retryAfter, req.URL.String()) - if err == nil { - return false - } - klog.V(4).Infof("Could not retry request - %v", err) } f(req, resp) return true }() if done { - return err + return retry.WrapPreviousError(err) } } } @@ -1074,6 +1205,9 @@ func (r *Request) Do(ctx context.Context) Result { if err != nil { return Result{err: err} } + if result.err == nil || len(result.body) > 0 { + metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body))) + } return result } @@ -1081,7 +1215,7 @@ func (r *Request) Do(ctx context.Context) Result { func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { - result.body, result.err = ioutil.ReadAll(resp.Body) + result.body, result.err = io.ReadAll(resp.Body) glogBody("Response Body", result.body) if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { result.err = r.transformUnstructuredResponseError(resp, req, result.body) @@ -1090,6 +1224,9 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { if err != nil { return nil, err } + if result.err == nil || len(result.body) > 0 { + metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body))) + } return result.body, result.err } @@ -1097,7 +1234,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { var body []byte if resp.Body != nil { - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) switch err.(type) { case nil: body = data @@ -1205,13 +1342,13 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if klog.V(8).Enabled() { + if klogV := klog.V(8); klogV.Enabled() { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + klogV.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - klog.Infof("%s: %s", prefix, truncateBody(string(body))) + klogV.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -1239,7 +1376,7 @@ const maxUnstructuredResponseTextBytes = 2048 // TODO: introduce transformation of generic http.Client.Do() errors that separates 4. func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { if body == nil && resp.Body != nil { - if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil { + if data, err := io.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil { body = data } } @@ -1348,6 +1485,14 @@ func (r Result) StatusCode(statusCode *int) Result { return r } +// ContentType returns the "Content-Type" response header into the passed +// string, returning the Result for possible chaining. (Only valid if no +// error code was returned.) +func (r Result) ContentType(contentType *string) Result { + *contentType = r.contentType + return r +} + // Into stores the result into obj, if possible. If obj is nil it is ignored. // If the returned object is of type Status and has .Status != StatusSuccess, the // additional information in Status will be used to enrich the error. @@ -1464,3 +1609,10 @@ func ValidatePathSegmentName(name string, prefix bool) []string { } return IsValidPathSegmentName(name) } + +func objectKeyFromMeta(objMeta metav1.Object) string { + if len(objMeta.GetNamespace()) > 0 { + return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName()) + } + return objMeta.GetName() +} diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 7c38c6d92..53f986cbf 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -108,10 +108,13 @@ func (c *Config) TransportConfig() (*transport.Config, error) { Groups: c.Impersonate.Groups, Extra: c.Impersonate.Extra, }, - Dial: c.Dial, Proxy: c.Proxy, } + if c.Dial != nil { + conf.DialHolder = &transport.DialHolder{Dial: c.Dial} + } + if c.ExecProvider != nil && c.AuthProvider != nil { return nil, errors.New("execProvider and authProvider cannot be used in combination") } diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go index a56d1838d..c4ce6e3b8 100644 --- a/vendor/k8s.io/client-go/rest/url_utils.go +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -77,9 +77,9 @@ func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) s return versionedAPIPath } -// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// DefaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It // requires Host and Version to be set prior to being called. -func defaultServerUrlFor(config *Config) (*url.URL, string, error) { +func DefaultServerUrlFor(config *Config) (*url.URL, string, error) { // TODO: move the default to secure when the apiserver supports TLS by default // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go index e95c020b2..9e1e04d14 100644 --- a/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -51,7 +51,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, err } if res != &got { - return "", nil, fmt.Errorf("unable to decode to metav1.Event") + return "", nil, fmt.Errorf("unable to decode to metav1.WatchEvent") } switch got.Type { case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go index d5098473e..eaaadc6a4 100644 --- a/vendor/k8s.io/client-go/rest/with_retry.go +++ b/vendor/k8s.io/client-go/rest/with_retry.go @@ -20,8 +20,8 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" + "net/url" "time" "k8s.io/klog/v2" @@ -51,40 +51,52 @@ var neverRetryError = IsRetryableErrorFunc(func(_ *http.Request, _ error) bool { // Note that WithRetry is not safe for concurrent use by multiple // goroutines without additional locking or coordination. type WithRetry interface { - // SetMaxRetries makes the request use the specified integer as a ceiling - // for retries upon receiving a 429 status code and the "Retry-After" header - // in the response. - // A zero maxRetries should prevent from doing any retry and return immediately. - SetMaxRetries(maxRetries int) - - // NextRetry advances the retry counter appropriately and returns true if the - // request should be retried, otherwise it returns false if: + // IsNextRetry advances the retry counter appropriately + // and returns true if the request should be retried, + // otherwise it returns false, if: // - we have already reached the maximum retry threshold. // - the error does not fall into the retryable category. // - the server has not sent us a 429, or 5xx status code and the // 'Retry-After' response header is not set with a value. + // - we need to seek to the beginning of the request body before we + // initiate the next retry, the function should log an error and + // return false if it fails to do so. // - // if retry is set to true, retryAfter will contain the information - // regarding the next retry. - // - // bodyReader: the custom io.Reader body used for the requests. if non-nil, the request cannot be retried. - // request: the original request sent to the server + // restReq: the associated rest.Request + // httpReq: the HTTP Request sent to the server // resp: the response sent from the server, it is set if err is nil // err: the server sent this error to us, if err is set then resp is nil. // f: a IsRetryableErrorFunc function provided by the client that determines // if the err sent by the server is retryable. - NextRetry(bodyReader io.Reader, req *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) (*RetryAfter, bool) - - // BeforeNextRetry is responsible for carrying out operations that need - // to be completed before the next retry is initiated: - // - if the request context is already canceled there is no need to - // retry, the function will return ctx.Err(). - // - we should wait the number of seconds the server has asked us to - // in the 'Retry-After' response header. + IsNextRetry(ctx context.Context, restReq *Request, httpReq *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) bool + + // Before should be invoked prior to each attempt, including + // the first one. If an error is returned, the request should + // be aborted immediately. // - // If BeforeNextRetry returns an error the client should abort the retry, - // otherwise it is safe to initiate the next retry. - BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string) error + // Before may also be additionally responsible for preparing + // the request for the next retry, namely in terms of resetting + // the request body in case it has been read. + Before(ctx context.Context, r *Request) error + + // After should be invoked immediately after an attempt is made. + After(ctx context.Context, r *Request, resp *http.Response, err error) + + // WrapPreviousError wraps the error from any previous attempt into + // the final error specified in 'finalErr', so the user has more + // context why the request failed. + // For example, if a request times out after multiple retries then + // we see a generic context.Canceled or context.DeadlineExceeded + // error which is not very useful in debugging. This function can + // wrap any error from previous attempt(s) to provide more context to + // the user. The error returned in 'err' must satisfy the + // following conditions: + // a: errors.Unwrap(err) = errors.Unwrap(finalErr) if finalErr + // implements Unwrap + // b: errors.Unwrap(err) = finalErr if finalErr does not + // implements Unwrap + // c: errors.Is(err, otherErr) = errors.Is(finalErr, otherErr) + WrapPreviousError(finalErr error) (err error) } // RetryAfter holds information associated with the next retry. @@ -105,42 +117,63 @@ type RetryAfter struct { type withRetry struct { maxRetries int attempts int + + // retry after parameters that pertain to the attempt that is to + // be made soon, so as to enable 'Before' and 'After' to refer + // to the retry parameters. + // - for the first attempt, it will always be nil + // - for consecutive attempts, it is non nil and holds the + // retry after parameters for the next attempt to be made. + retryAfter *RetryAfter + + // we keep track of two most recent errors, if the most + // recent attempt is labeled as 'N' then: + // - currentErr represents the error returned by attempt N, it + // can be nil if attempt N did not return an error. + // - previousErr represents an error from an attempt 'M' which + // precedes attempt 'N' (N - M >= 1), it is non nil only when: + // - for a sequence of attempt(s) 1..n (n>1), there + // is an attempt k (k r.maxRetries { - return retryAfter, false + return false } // if the server returned an error, it takes precedence over the http response. var errIsRetryable bool - if f != nil && err != nil && f.IsErrorRetryable(req, err) { + if f != nil && err != nil && f.IsErrorRetryable(httpReq, err) { errIsRetryable = true // we have a retryable error, for which we will create an // artificial "Retry-After" response. resp = retryAfterResponse() } if err != nil && !errIsRetryable { - return retryAfter, false + return false } // if we are here, we have either a or b: @@ -150,28 +183,127 @@ func (r *withRetry) NextRetry(bodyReader io.Reader, req *http.Request, resp *htt // need to check if it is retryable seconds, wait := checkWait(resp) if !wait { - return retryAfter, false + return false } - retryAfter.Wait = time.Duration(seconds) * time.Second - retryAfter.Reason = getRetryReason(r.attempts, seconds, resp, err) - return retryAfter, true + r.retryAfter.Wait = time.Duration(seconds) * time.Second + r.retryAfter.Reason = getRetryReason(r.attempts, seconds, resp, err) + + return true } -func (r *withRetry) BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string) error { - // Ensure the response body is fully read and closed before - // we reconnect, so that we reuse the same TCP connection. +func (r *withRetry) Before(ctx context.Context, request *Request) error { + // If the request context is already canceled there + // is no need to retry. if ctx.Err() != nil { + r.trackPreviousError(ctx.Err()) return ctx.Err() } - klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", retryAfter.Wait, retryAfter.Attempt, url) - if backoff != nil { - backoff.Sleep(retryAfter.Wait) + url := request.URL() + // r.retryAfter represents the retry after parameters calculated + // from the (response, err) tuple from the last attempt, so 'Before' + // can apply these retry after parameters prior to the next attempt. + // 'r.retryAfter == nil' indicates that this is the very first attempt. + if r.retryAfter == nil { + // we do a backoff sleep before the first attempt is made, + // (preserving current behavior). + if request.backoff != nil { + request.backoff.Sleep(request.backoff.CalculateBackoff(url)) + } + return nil + } + + // if we are here, we have made attempt(s) at least once before. + if request.backoff != nil { + delay := request.backoff.CalculateBackoff(url) + if r.retryAfter.Wait > delay { + delay = r.retryAfter.Wait + } + request.backoff.Sleep(delay) + } + + // We are retrying the request that we already send to + // apiserver at least once before. This request should + // also be throttled with the client-internal rate limiter. + if err := request.tryThrottleWithInfo(ctx, r.retryAfter.Reason); err != nil { + r.trackPreviousError(ctx.Err()) + return err } + + klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", r.retryAfter.Wait, r.retryAfter.Attempt, request.URL().String()) return nil } +func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Response, err error) { + // 'After' is invoked immediately after an attempt is made, let's label + // the attempt we have just made as attempt 'N'. + // the current value of r.retryAfter represents the retry after + // parameters calculated from the (response, err) tuple from + // attempt N-1, so r.retryAfter is outdated and should not be + // referred to here. + isRetry := r.retryAfter != nil + r.retryAfter = nil + + // the client finishes a single request after N attempts (1..N) + // - all attempts (1..N) are counted to the rest_client_requests_total + // metric (current behavior). + // - every attempt after the first (2..N) are counted to the + // rest_client_request_retries_total metric. + updateRequestResultMetric(ctx, request, resp, err) + if isRetry { + // this is attempt 2 or later + updateRequestRetryMetric(ctx, request, resp, err) + } + + if request.c.base != nil { + if err != nil { + request.backoff.UpdateBackoff(request.URL(), err, 0) + } else { + request.backoff.UpdateBackoff(request.URL(), err, resp.StatusCode) + } + } +} + +func (r *withRetry) WrapPreviousError(currentErr error) error { + if currentErr == nil || r.previousErr == nil { + return currentErr + } + + // if both previous and current error objects represent the error, + // then there is no need to wrap the previous error. + if currentErr.Error() == r.previousErr.Error() { + return currentErr + } + + previousErr := r.previousErr + // net/http wraps the underlying error with an url.Error, if the + // previous err object is an instance of url.Error, then we can + // unwrap it to get to the inner error object, this is so we can + // avoid error message like: + // Error: Get "http://foo.bar/api/v1": context deadline exceeded - error \ + // from a previous attempt: Error: Get "http://foo.bar/api/v1": EOF + if urlErr, ok := r.previousErr.(*url.Error); ok && urlErr != nil { + if urlErr.Unwrap() != nil { + previousErr = urlErr.Unwrap() + } + } + + return &wrapPreviousError{ + currentErr: currentErr, + previousError: previousErr, + } +} + +type wrapPreviousError struct { + currentErr, previousError error +} + +func (w *wrapPreviousError) Unwrap() error { return w.currentErr } +func (w *wrapPreviousError) Error() string { + return fmt.Sprintf("%s - error from a previous attempt: %s", w.currentErr.Error(), w.previousError.Error()) +} + // checkWait returns true along with a number of seconds if // the server instructed us to wait before retrying. func checkWait(resp *http.Response) (int, bool) { @@ -217,13 +349,21 @@ func readAndCloseResponseBody(resp *http.Response) { defer resp.Body.Close() if resp.ContentLength <= maxBodySlurpSize { - io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) + io.Copy(io.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) } } func retryAfterResponse() *http.Response { + return retryAfterResponseWithDelay("1") +} + +func retryAfterResponseWithDelay(delay string) *http.Response { + return retryAfterResponseWithCodeAndDelay(http.StatusInternalServerError, delay) +} + +func retryAfterResponseWithCodeAndDelay(code int, delay string) *http.Response { return &http.Response{ - StatusCode: http.StatusInternalServerError, - Header: http.Header{"Retry-After": []string{"1"}}, + StatusCode: code, + Header: http.Header{"Retry-After": []string{delay}}, } } diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go index c8ae0aaf5..270cc4ddb 100644 --- a/vendor/k8s.io/client-go/testing/actions.go +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -30,41 +30,61 @@ import ( ) func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + return NewRootGetActionWithOptions(resource, name, metav1.GetOptions{}) +} + +func NewRootGetActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Name = name + action.GetOptions = opts return action } func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + return NewGetActionWithOptions(resource, namespace, name, metav1.GetOptions{}) +} + +func NewGetActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + return NewGetSubresourceActionWithOptions(resource, namespace, subresource, name, metav1.GetOptions{}) +} + +func NewGetSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + return NewRootGetSubresourceActionWithOptions(resource, subresource, name, metav1.GetOptions{}) +} + +func NewRootGetSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Name = name + action.GetOptions = opts return action } @@ -76,6 +96,21 @@ func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVe action.Kind = kind labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewRootListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.ListOptions = opts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} return action } @@ -86,6 +121,21 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio action.Resource = resource action.Kind = kind action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} @@ -93,36 +143,55 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio } func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + return NewRootCreateActionWithOptions(resource, object, metav1.CreateOptions{}) +} + +func NewRootCreateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Object = object + action.CreateOptions = opts return action } func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateActionWithOptions(resource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Namespace = namespace action.Object = object + action.CreateOptions = opts return action } func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + return NewRootCreateSubresourceActionWithOptions(resource, name, subresource, object, metav1.CreateOptions{}) +} + +func NewRootCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateSubresourceActionWithOptions(resource, name, subresource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource @@ -130,41 +199,61 @@ func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subr action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + return NewRootUpdateActionWithOptions(resource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Object = object + action.UpdateOptions = opts return action } func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateActionWithOptions(resource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewRootPatchActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}) +} + +func NewRootPatchActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewPatchActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}) +} + +func NewPatchActionWithOptions(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -172,11 +261,16 @@ func NewPatchAction(resource schema.GroupVersionResource, namespace string, name action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewRootPatchSubresourceActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewRootPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -184,11 +278,16 @@ func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name st action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewPatchSubresourceActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -197,26 +296,38 @@ func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + return NewRootUpdateSubresourceActionWithOptions(resource, subresource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Object = object + action.UpdateOptions = opts return action } + func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateSubresourceActionWithOptions(resource, subresource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } @@ -236,11 +347,16 @@ func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name s } func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + return NewRootDeleteSubresourceActionWithOptions(resource, subresource, name, metav1.DeleteOptions{}) +} + +func NewRootDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Name = name + action.DeleteOptions = opts return action } @@ -261,41 +377,69 @@ func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, } func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + return NewDeleteSubresourceActionWithOptions(resource, subresource, namespace, name, metav1.DeleteOptions{}) +} + +func NewDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.DeleteOptions = opts return action } func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootDeleteCollectionActionWithOptions(resource, metav1.DeleteOptions{}, listOpts) +} + +func NewRootDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewDeleteCollectionActionWithOptions(resource, namespace, metav1.DeleteOptions{}, listOpts) +} + +func NewDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, namespace string, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource action.Namespace = namespace - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootWatchActionWithOptions(resource, listOpts) +} + +func NewRootWatchActionWithOptions(resource schema.GroupVersionResource, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -328,10 +472,17 @@ func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fi } func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewWatchActionWithOptions(resource, namespace, listOpts) +} + +func NewWatchActionWithOptions(resource schema.GroupVersionResource, namespace string, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -487,17 +638,23 @@ func (a GenericActionImpl) DeepCopy() Action { type GetActionImpl struct { ActionImpl - Name string + Name string + GetOptions metav1.GetOptions } func (a GetActionImpl) GetName() string { return a.Name } +func (a GetActionImpl) GetGetOptions() metav1.GetOptions { + return a.GetOptions +} + func (a GetActionImpl) DeepCopy() Action { return GetActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), Name: a.Name, + GetOptions: *a.GetOptions.DeepCopy(), } } @@ -506,6 +663,7 @@ type ListActionImpl struct { Kind schema.GroupVersionKind Name string ListRestrictions ListRestrictions + ListOptions metav1.ListOptions } func (a ListActionImpl) GetKind() schema.GroupVersionKind { @@ -516,6 +674,10 @@ func (a ListActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a ListActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a ListActionImpl) DeepCopy() Action { return ListActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -525,48 +687,62 @@ func (a ListActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + ListOptions: *a.ListOptions.DeepCopy(), } } type CreateActionImpl struct { ActionImpl - Name string - Object runtime.Object + Name string + Object runtime.Object + CreateOptions metav1.CreateOptions } func (a CreateActionImpl) GetObject() runtime.Object { return a.Object } +func (a CreateActionImpl) GetCreateOptions() metav1.CreateOptions { + return a.CreateOptions +} + func (a CreateActionImpl) DeepCopy() Action { return CreateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + CreateOptions: *a.CreateOptions.DeepCopy(), } } type UpdateActionImpl struct { ActionImpl - Object runtime.Object + Object runtime.Object + UpdateOptions metav1.UpdateOptions } func (a UpdateActionImpl) GetObject() runtime.Object { return a.Object } +func (a UpdateActionImpl) GetUpdateOptions() metav1.UpdateOptions { + return a.UpdateOptions +} + func (a UpdateActionImpl) DeepCopy() Action { return UpdateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + UpdateOptions: *a.UpdateOptions.DeepCopy(), } } type PatchActionImpl struct { ActionImpl - Name string - PatchType types.PatchType - Patch []byte + Name string + PatchType types.PatchType + Patch []byte + PatchOptions metav1.PatchOptions } func (a PatchActionImpl) GetName() string { @@ -581,14 +757,19 @@ func (a PatchActionImpl) GetPatchType() types.PatchType { return a.PatchType } +func (a PatchActionImpl) GetPatchOptions() metav1.PatchOptions { + return a.PatchOptions +} + func (a PatchActionImpl) DeepCopy() Action { patch := make([]byte, len(a.Patch)) copy(patch, a.Patch) return PatchActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - PatchType: a.PatchType, - Patch: patch, + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + PatchOptions: *a.PatchOptions.DeepCopy(), } } @@ -617,12 +798,22 @@ func (a DeleteActionImpl) DeepCopy() Action { type DeleteCollectionActionImpl struct { ActionImpl ListRestrictions ListRestrictions + DeleteOptions metav1.DeleteOptions + ListOptions metav1.ListOptions } func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a DeleteCollectionActionImpl) GetDeleteOptions() metav1.DeleteOptions { + return a.DeleteOptions +} + +func (a DeleteCollectionActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a DeleteCollectionActionImpl) DeepCopy() Action { return DeleteCollectionActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -630,18 +821,25 @@ func (a DeleteCollectionActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + DeleteOptions: *a.DeleteOptions.DeepCopy(), + ListOptions: *a.ListOptions.DeepCopy(), } } type WatchActionImpl struct { ActionImpl WatchRestrictions WatchRestrictions + ListOptions metav1.ListOptions } func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { return a.WatchRestrictions } +func (a WatchActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a WatchActionImpl) DeepCopy() Action { return WatchActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -650,6 +848,7 @@ func (a WatchActionImpl) DeepCopy() Action { Fields: a.WatchRestrictions.Fields.DeepCopySelector(), ResourceVersion: a.WatchRestrictions.ResourceVersion, }, + ListOptions: *a.ListOptions.DeepCopy(), } } diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index fe7f0cd32..d288a3aa4 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -19,19 +19,24 @@ package testing import ( "fmt" "reflect" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/yaml" "sort" "strings" "sync" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta/testrestmapper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/managedfields" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" @@ -46,26 +51,32 @@ type ObjectTracker interface { Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. - Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. - Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error // Update updates an existing object in the tracker in the specified namespace. - Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error + + // Patch patches an existing object in the tracker in the specified namespace. + Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error + + // Apply applies an object in the tracker in the specified namespace. + Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. - List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. - Delete(gvr schema.GroupVersionResource, ns, name string) error + Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. - Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) + Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) } // ObjectScheme abstracts the implementation of common operations on objects. @@ -76,123 +87,200 @@ type ObjectScheme interface { // ObjectReaction returns a ReactionFunc that applies core.Action to // the given tracker. +// +// If tracker also implements ManagedFieldObjectTracker, then managed fields +// will be handled by the tracker and apply patch actions will be evaluated +// using the field manager and will take field ownership into consideration. +// Without a ManagedFieldObjectTracker, apply patch actions do not consider +// field ownership. +// +// WARNING: There is no server side defaulting, validation, or conversion handled +// by the fake client and subresources are not handled accurately (fields in the +// root resource are not automatically updated when a scale resource is updated, for example). func ObjectReaction(tracker ObjectTracker) ReactionFunc { + reactor := objectTrackerReact{tracker: tracker} return func(action Action) (bool, runtime.Object, error) { - ns := action.GetNamespace() - gvr := action.GetResource() // Here and below we need to switch on implementation types, // not on interfaces, as some interfaces are identical // (e.g. UpdateAction and CreateAction), so if we use them, // updates and creates end up matching the same case branch. switch action := action.(type) { - case ListActionImpl: - obj, err := tracker.List(gvr, action.GetKind(), ns) + obj, err := reactor.List(action) return true, obj, err - case GetActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) + obj, err := reactor.Get(action) return true, obj, err - case CreateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - if action.GetSubresource() == "" { - err = tracker.Create(gvr, action.GetObject(), ns) - } else { - // TODO: Currently we're handling subresource creation as an update - // on the enclosing resource. This works for some subresources but - // might not be generic enough. - err = tracker.Update(gvr, action.GetObject(), ns) - } - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Create(action) return true, obj, err - case UpdateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - err = tracker.Update(gvr, action.GetObject(), ns) - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Update(action) return true, obj, err - case DeleteActionImpl: - err := tracker.Delete(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err - } - return true, nil, nil - + obj, err := reactor.Delete(action) + return true, obj, err case PatchActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err + if action.GetPatchType() == types.ApplyPatchType { + obj, err := reactor.Apply(action) + return true, obj, err } + obj, err := reactor.Patch(action) + return true, obj, err + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} - old, err := json.Marshal(obj) - if err != nil { - return true, nil, err - } +type objectTrackerReact struct { + tracker ObjectTracker +} - // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields - // in obj that are removed by patch are cleared - value := reflect.ValueOf(obj) - value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) - - switch action.GetPatchType() { - case types.JSONPatchType: - patch, err := jsonpatch.DecodePatch(action.GetPatch()) - if err != nil { - return true, nil, err - } - modified, err := patch.Apply(old) - if err != nil { - return true, nil, err - } - - if err = json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.MergePatchType: - modified, err := jsonpatch.MergePatch(old, action.GetPatch()) - if err != nil { - return true, nil, err - } - - if err := json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.StrategicMergePatchType: - mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) - if err != nil { - return true, nil, err - } - if err = json.Unmarshal(mergedByte, obj); err != nil { - return true, nil, err - } - default: - return true, nil, fmt.Errorf("PatchType is not supported") - } +func (o objectTrackerReact) List(action ListActionImpl) (runtime.Object, error) { + return o.tracker.List(action.GetResource(), action.GetKind(), action.GetNamespace(), action.ListOptions) +} - if err = tracker.Update(gvr, obj, ns); err != nil { - return true, nil, err - } +func (o objectTrackerReact) Get(action GetActionImpl) (runtime.Object, error) { + return o.tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName(), action.GetOptions) +} + +func (o objectTrackerReact) Create(action CreateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } + if action.GetSubresource() == "" { + err = o.tracker.Create(gvr, action.GetObject(), ns, action.CreateOptions) + if err != nil { + return nil, err + } + } else { + oldObj, getOldObjErr := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if getOldObjErr != nil { + return nil, getOldObjErr + } + // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. + if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = o.tracker.Update(gvr, action.GetObject(), ns, metav1.UpdateOptions{ + DryRun: action.CreateOptions.DryRun, + FieldManager: action.CreateOptions.FieldManager, + FieldValidation: action.CreateOptions.FieldValidation, + }) + } else { + // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. + return action.GetObject(), nil + } + } + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Update(action UpdateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } - return true, obj, nil + err = o.tracker.Update(gvr, action.GetObject(), ns, action.UpdateOptions) + if err != nil { + return nil, err + } - default: - return false, nil, fmt.Errorf("no reaction implemented for %s", action) + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Delete(action DeleteActionImpl) (runtime.Object, error) { + err := o.tracker.Delete(action.GetResource(), action.GetNamespace(), action.GetName(), action.DeleteOptions) + return nil, err +} + +func (o objectTrackerReact) Apply(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil { + return nil, err + } + err := o.tracker.Apply(gvr, patchObj, ns, action.PatchOptions) + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Patch(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("PatchType %s is not supported", action.GetPatchType()) } + + if err = o.tracker.Patch(gvr, obj, ns, action.PatchOptions); err != nil { + return nil, err + } + + return obj, nil } type tracker struct { @@ -221,7 +309,11 @@ func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracke } } -func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. @@ -260,7 +352,12 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK return list.DeepCopyObject(), nil } -func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + t.lock.Lock() defer t.lock.Unlock() @@ -273,8 +370,12 @@ func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Inter return fakewatcher, nil } -func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { - errNotFound := errors.NewNotFound(gvr.GroupResource(), name) +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + errNotFound := apierrors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() @@ -295,7 +396,7 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { - return nil, &errors.StatusError{ErrStatus: *status} + return nil, &apierrors.StatusError{ErrStatus: *status} } } @@ -342,11 +443,70 @@ func (t *tracker) Add(obj runtime.Object) error { return nil } -func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } return t.add(gvr, obj, ns, false) } -func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, patchedObject, ns, true) +} + +func (t *tracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + obj, err := t.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + old, err := json.Marshal(obj) + if err != nil { + return err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + // For backward compatibility with behavior 1.30 and earlier, continue to handle apply + // via strategic merge patch (clients may use fake.NewClientset and ManagedFieldObjectTracker + // for full field manager support). + patch, err := json.Marshal(applyConfiguration) + if err != nil { + return err + } + mergedByte, err := strategicpatch.StrategicMergePatch(old, patch, obj) + if err != nil { + return err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return err + } + return t.add(gvr, obj, ns, true) } @@ -388,7 +548,7 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) - return errors.NewBadRequest(msg) + return apierrors.NewBadRequest(msg) } _, ok := t.objects[gvr] @@ -406,12 +566,12 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st t.objects[gvr][namespacedName] = obj return nil } - return errors.NewAlreadyExists(gr, newMeta.GetName()) + return apierrors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. - return errors.NewNotFound(gr, newMeta.GetName()) + return apierrors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj @@ -441,19 +601,23 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { return nil } -func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } delete(objs, namespacedName) @@ -463,6 +627,203 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error return nil } +type managedFieldObjectTracker struct { + ObjectTracker + scheme ObjectScheme + objectConverter runtime.ObjectConvertor + mapper meta.RESTMapper + typeConverter managedfields.TypeConverter +} + +var _ ObjectTracker = &managedFieldObjectTracker{} + +// NewFieldManagedObjectTracker returns an ObjectTracker that can be used to keep track +// of objects and managed fields for the fake clientset. Mostly useful for unit tests. +func NewFieldManagedObjectTracker(scheme *runtime.Scheme, decoder runtime.Decoder, typeConverter managedfields.TypeConverter) ObjectTracker { + return &managedFieldObjectTracker{ + ObjectTracker: NewObjectTracker(scheme, decoder), + scheme: scheme, + objectConverter: scheme, + mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme), + typeConverter: typeConverter, + } +} + +func (t *managedFieldObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.CreateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objType, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + // Stamp GVK + apiVersion, kind := gvk.ToAPIVersionAndKind() + objType.SetAPIVersion(apiVersion) + objType.SetKind(kind) + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + liveObject, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(liveObject, obj, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.UpdateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, obj, opts.FieldManager) + if err != nil { + return err + } + + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(patchedObject) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, patchedObject, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Patch(gvr, objWithManagedFields, ns, vopts...) +} + +func (t *managedFieldObjectTracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + exists := true + liveObject, err := t.ObjectTracker.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + exists = false + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + force := false + if opts.Force != nil { + force = *opts.Force + } + objWithManagedFields, err := mgr.Apply(liveObject, applyConfiguration, opts.FieldManager, force) + if err != nil { + return err + } + + if !exists { + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, metav1.CreateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } else { + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, metav1.UpdateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } +} + +func (t *managedFieldObjectTracker) fieldManagerFor(gvk schema.GroupVersionKind) (*managedfields.FieldManager, error) { + return managedfields.NewDefaultFieldManager( + t.typeConverter, + t.objectConverter, + &objectDefaulter{}, + t.scheme, + gvk, + gvk.GroupVersion(), + "", + nil) +} + +// objectDefaulter implements runtime.Defaulter, but it actually +// does nothing. +type objectDefaulter struct{} + +func (d *objectDefaulter) Default(_ runtime.Object) {} + // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. @@ -569,3 +930,76 @@ func resourceCovers(resource string, action Action) bool { return false } + +// assertOptionalSingleArgument returns an error if there is more than one variadic argument. +// Otherwise, it returns the first variadic argument, or zero value if there are no arguments. +func assertOptionalSingleArgument[T any](arguments []T) (T, error) { + var a T + switch len(arguments) { + case 0: + return a, nil + case 1: + return arguments[0], nil + default: + return a, fmt.Errorf("expected only one option argument but got %d", len(arguments)) + } +} + +type TypeResolver interface { + Type(openAPIName string) typed.ParseableType +} + +type TypeConverter struct { + Scheme *runtime.Scheme + TypeResolver TypeResolver +} + +func (tc TypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + name, err := tc.openAPIName(gvk) + if err != nil { + return nil, err + } + t := tc.TypeResolver.Type(name) + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return t.FromStructured(obj, opts...) + } +} + +func (tc TypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + vu := value.AsValue().Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func (tc TypeConverter) openAPIName(kind schema.GroupVersionKind) (string, error) { + example, err := tc.Scheme.New(kind) + if err != nil { + return "", err + } + rtype := reflect.TypeOf(example).Elem() + name := friendlyName(rtype.PkgPath() + "." + rtype.Name()) + return name, nil +} + +// This is a copy of openapi.friendlyName. +// TODO: consider introducing a shared version of this function in apimachinery. +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS index 3e05d309b..c4ea6463d 100644 --- a/vendor/k8s.io/client-go/tools/auth/OWNERS +++ b/vendor/k8s.io/client-go/tools/auth/OWNERS @@ -1,9 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- sig-auth-authenticators-approvers + - sig-auth-authenticators-approvers reviewers: -- sig-auth-authenticators-reviewers + - sig-auth-authenticators-reviewers labels: -- sig/auth - + - sig/auth diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go index ac30271fe..5a256c1c3 100644 --- a/vendor/k8s.io/client-go/tools/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -32,7 +32,7 @@ Having a defined format allows: The file format is json, marshalled from a struct authcfg.Info. -Clinet libraries in other languages should use the same format. +Client libraries in other languages should use the same format. It is not intended to store general preferences, such as default namespace, output options, etc. CLIs (such as kubectl) and UIs should @@ -65,7 +65,6 @@ package auth // TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated. import ( "encoding/json" - "io/ioutil" "os" restclient "k8s.io/client-go/rest" @@ -90,7 +89,7 @@ func LoadFromFile(path string) (*Info, error) { if _, err := os.Stat(path); os.IsNotExist(err) { return nil, err } - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index c4824d6bc..921ac2fa0 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -1,38 +1,28 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- caesarxuchao -- liggitt -- ncdc + - thockin + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - ncdc reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- davidopp -- pmorie -- janetkuo -- justinsb -- soltysh -- jsafrane -- dims -- hongchaodeng -- krousey -- xiang90 -- ingvagabund -- resouer -- jessfraz -- mfojtik -- sdminonne -- ncdc + - thockin + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - justinsb + - soltysh + - jsafrane + - dims + - ingvagabund + - ncdc +emeritus_approvers: + - lavalamp diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 6c9c5df40..e523a6652 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -17,6 +17,7 @@ limitations under the License. package cache import ( + "errors" "sync" "time" @@ -49,14 +50,21 @@ type Config struct { Process ProcessFunc // ObjectType is an example object of the type this controller is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // expected to handle. ObjectType runtime.Object + // ObjectDescription is the description to use when logging type-specific information about this controller. + ObjectDescription string + // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + // ShouldResync is periodically used by the reflector to determine // whether to Resync the Queue. If ShouldResync is `nil` or // returns true, it means the reflector should proceed with the @@ -83,7 +91,7 @@ type Config struct { type ShouldResyncFunc func() bool // ProcessFunc processes a single object. -type ProcessFunc func(obj interface{}) error +type ProcessFunc func(obj interface{}, isInInitialList bool) error // `*controller` implements Controller type controller struct { @@ -130,15 +138,19 @@ func (c *controller) Run(stopCh <-chan struct{}) { <-stopCh c.config.Queue.Close() }() - r := NewReflector( + r := NewReflectorWithOptions( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, - c.config.FullResyncPeriod, + ReflectorOptions{ + ResyncPeriod: c.config.FullResyncPeriod, + MinWatchTimeout: c.config.MinWatchTimeout, + TypeDescription: c.config.ObjectDescription, + Clock: c.clock, + }, ) r.ShouldResync = c.config.ShouldResync r.WatchListPageSize = c.config.WatchListPageSize - r.clock = c.clock if c.config.WatchErrorHandler != nil { r.watchErrorHandler = c.config.WatchErrorHandler } @@ -210,7 +222,7 @@ func (c *controller) processLoop() { // happen if the watch is closed and misses the delete event and we don't // notice the deletion until the subsequent re-list. type ResourceEventHandler interface { - OnAdd(obj interface{}) + OnAdd(obj interface{}, isInInitialList bool) OnUpdate(oldObj, newObj interface{}) OnDelete(obj interface{}) } @@ -219,6 +231,9 @@ type ResourceEventHandler interface { // as few of the notification functions as you want while still implementing // ResourceEventHandler. This adapter does not remove the prohibition against // modifying the objects. +// +// See ResourceEventHandlerDetailedFuncs if your use needs to propagate +// HasSynced. type ResourceEventHandlerFuncs struct { AddFunc func(obj interface{}) UpdateFunc func(oldObj, newObj interface{}) @@ -226,7 +241,7 @@ type ResourceEventHandlerFuncs struct { } // OnAdd calls AddFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) { if r.AddFunc != nil { r.AddFunc(obj) } @@ -246,6 +261,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { } } +// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs +// except its AddFunc accepts the isInInitialList parameter, for propagating +// HasSynced. +type ResourceEventHandlerDetailedFuncs struct { + AddFunc func(obj interface{}, isInInitialList bool) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) { + if r.AddFunc != nil { + r.AddFunc(obj, isInInitialList) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + // FilteringResourceEventHandler applies the provided filter to all events coming // in, ensuring the appropriate nested handler method is invoked. An object // that starts passing the filter after an update is considered an add, and an @@ -257,11 +302,11 @@ type FilteringResourceEventHandler struct { } // OnAdd calls the nested handler only if the filter succeeds -func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { +func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { if !r.FilterFunc(obj) { return } - r.Handler.OnAdd(obj) + r.Handler.OnAdd(obj, isInInitialList) } // OnUpdate ensures the proper handler is called depending on whether the filter matches @@ -272,7 +317,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { case newer && older: r.Handler.OnUpdate(oldObj, newObj) case newer && !older: - r.Handler.OnAdd(newObj) + r.Handler.OnAdd(newObj, false) case !newer && older: r.Handler.OnDelete(oldObj) default: @@ -298,6 +343,68 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { return MetaNamespaceKeyFunc(obj) } +// DeletionHandlingObjectToName checks for +// DeletedFinalStateUnknown objects before calling +// ObjectToName. +func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) { + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return ParseObjectName(d.Key) + } + return ObjectToName(obj) +} + +// InformerOptions configure a Reflector. +type InformerOptions struct { + // ListerWatcher implements List and Watch functions for the source of the resource + // the informer will be informing about. + ListerWatcher ListerWatcher + + // ObjectType is an object of the type that informer is expected to receive. + ObjectType runtime.Object + + // Handler defines functions that should called on object mutations. + Handler ResourceEventHandler + + // ResyncPeriod is the underlying Reflector's resync period. If non-zero, the store + // is re-synced with that frequency - Modify events are delivered even if objects + // didn't change. + // This is useful for synchronizing objects that configure external resources + // (e.g. configure cloud provider functionalities). + // Optional - if unset, store resyncing is not happening periodically. + ResyncPeriod time.Duration + + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + + // Indexers, if set, are the indexers for the received objects to optimize + // certain queries. + // Optional - if unset no indexes are maintained. + Indexers Indexers + + // Transform function, if set, will be called on all objects before they will be + // put into the Store and corresponding Add/Modify/Delete handlers will be invoked + // for them. + // Optional - if unset no additional transforming is happening. + Transform TransformFunc +} + +// NewInformerWithOptions returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +func NewInformerWithOptions(options InformerOptions) (Store, Controller) { + var clientState Store + if options.Indexers == nil { + clientState = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + } else { + clientState = NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers) + } + return clientState, newInformer(clientState, options) +} + // NewInformer returns a Store and a controller for populating the store // while also providing event notifications. You should only used the returned // Store for Get/List operations; Add/Modify/Deletes will cause the event @@ -312,6 +419,8 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { // long as possible (until the upstream source closes the watch or times out, // or you stop the controller). // - h is the object you want notifications sent to. +// +// Deprecated: Use NewInformerWithOptions instead. func NewInformer( lw ListerWatcher, objType runtime.Object, @@ -321,7 +430,13 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + } + return clientState, newInformer(clientState, options) } // NewIndexerInformer returns an Indexer and a Controller for populating the index @@ -339,6 +454,8 @@ func NewInformer( // or you stop the controller). // - h is the object you want notifications sent to. // - indexers is the indexer for the received object type. +// +// Deprecated: Use NewInformerWithOptions instead. func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -349,27 +466,25 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + } + return clientState, newInformer(clientState, options) } -// TransformFunc allows for transforming an object before it will be processed -// and put into the controller cache and before the corresponding handlers will -// be called on it. -// TransformFunc (similarly to ResourceEventHandler functions) should be able -// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown -// -// The most common usage pattern is to clean-up some parts of the object to -// reduce component memory usage if a given component doesn't care about them. -// given controller doesn't care for them -type TransformFunc func(interface{}) (interface{}, error) - // NewTransformingInformer returns a Store and a controller for populating // the store while also providing event notifications. You should only used // the returned Store for Get/List operations; Add/Modify/Deletes will cause // the event notifications to be faulty. // The given transform function will be called on all objects before they will -// put put into the Store and corresponding Add/Modify/Delete handlers will -// be invokved for them. +// put into the Store and corresponding Add/Modify/Delete handlers will +// be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingInformer( lw ListerWatcher, objType runtime.Object, @@ -380,7 +495,14 @@ func NewTransformingInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Transform: transformer, + } + return clientState, newInformer(clientState, options) } // NewTransformingIndexerInformer returns an Indexer and a controller for @@ -390,6 +512,8 @@ func NewTransformingInformer( // The given transform function will be called on all objects before they will // be put into the Index and corresponding Add/Modify/Delete handlers will // be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -401,78 +525,82 @@ func NewTransformingIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + Transform: transformer, + } + return clientState, newInformer(clientState, options) +} + +// Multiplexes updates in the form of a list of Deltas into a Store, and informs +// a given handler of events OnUpdate, OnAdd, OnDelete +func processDeltas( + // Object which receives event notifications from the given deltas + handler ResourceEventHandler, + clientState Store, + deltas Deltas, + isInInitialList bool, +) error { + // from oldest to newest + for _, d := range deltas { + obj := d.Object + + switch d.Type { + case Sync, Replaced, Added, Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + handler.OnUpdate(old, obj) + } else { + if err := clientState.Add(obj); err != nil { + return err + } + handler.OnAdd(obj, isInInitialList) + } + case Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + handler.OnDelete(obj) + } + } + return nil } // newInformer returns a controller for populating the store while also // providing event notifications. // // Parameters -// - lw is list and watch functions for the source of the resource you want to -// be informed of. -// - objType is an object of the type that you expect to receive. -// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// - h is the object you want notifications sent to. // - clientState is the store you want to populate -func newInformer( - lw ListerWatcher, - objType runtime.Object, - resyncPeriod time.Duration, - h ResourceEventHandler, - clientState Store, - transformer TransformFunc, -) Controller { +// - options contain the options to configure the controller +func newInformer(clientState Store, options InformerOptions) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, + Transformer: options.Transform, }) cfg := &Config{ Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, + ListerWatcher: options.ListerWatcher, + ObjectType: options.ObjectType, + FullResyncPeriod: options.ResyncPeriod, + MinWatchTimeout: options.MinWatchTimeout, RetryOnError: false, - Process: func(obj interface{}) error { - // from oldest to newest - for _, d := range obj.(Deltas) { - obj := d.Object - if transformer != nil { - var err error - obj, err = transformer(obj) - if err != nil { - return err - } - } - - switch d.Type { - case Sync, Replaced, Added, Updated: - if old, exists, err := clientState.Get(obj); err == nil && exists { - if err := clientState.Update(obj); err != nil { - return err - } - h.OnUpdate(old, obj) - } else { - if err := clientState.Add(obj); err != nil { - return err - } - h.OnAdd(obj) - } - case Deleted: - if err := clientState.Delete(obj); err != nil { - return err - } - h.OnDelete(obj) - } + Process: func(obj interface{}, isInInitialList bool) error { + if deltas, ok := obj.(Deltas); ok { + return processDeltas(options.Handler, clientState, deltas, isInInitialList) } - return nil + return errors.New("object given as Process argument is not Deltas") }, } return New(cfg) diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 0c13a41f0..ce74dfb6f 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -51,6 +51,10 @@ type DeltaFIFOOptions struct { // When true, `Replaced` events will be sent for items passed to a Replace() call. // When false, `Sync` events will be sent instead. EmitDeltaTypeReplaced bool + + // If set, will be called for objects before enqueueing them. Please + // see the comment on TransformFunc for details. + Transformer TransformFunc } // DeltaFIFO is like FIFO, but differs in two ways. One is that the @@ -129,8 +133,29 @@ type DeltaFIFO struct { // emitDeltaTypeReplaced is whether to emit the Replaced or Sync // DeltaType when Replace() is called (to preserve backwards compat). emitDeltaTypeReplaced bool + + // Called with every object if non-nil. + transformer TransformFunc } +// TransformFunc allows for transforming an object before it will be processed. +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// +// New in v1.27: TransformFunc sees the object before any other actor, and it +// is now safe to mutate the object in place instead of making a copy. +// +// It's recommended for the TransformFunc to be idempotent. +// It MUST be idempotent if objects already present in the cache are passed to +// the Replace() to avoid re-mutating them. Default informers do not pass +// existing objects to Replace though. +// +// Note that TransformFunc is called while inserting objects into the +// notification queue and is therefore extremely performance sensitive; please +// do not do anything that will take a long time. +type TransformFunc func(interface{}) (interface{}, error) + // DeltaType is the type of a change (addition, deletion, etc) type DeltaType string @@ -227,6 +252,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { knownObjects: opts.KnownObjects, emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced, + transformer: opts.Transformer, } f.cond.L = &f.lock return f @@ -271,6 +297,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { func (f *DeltaFIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *DeltaFIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -407,10 +437,41 @@ func isDeletionDup(a, b *Delta) *Delta { // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + return f.queueActionInternalLocked(actionType, actionType, obj) +} + +// queueActionInternalLocked appends to the delta list for the object. +// The actionType is emitted and must honor emitDeltaTypeReplaced. +// The internalActionType is only used within this function and must +// ignore emitDeltaTypeReplaced. +// Caller must lock first. +func (f *DeltaFIFO) queueActionInternalLocked(actionType, internalActionType DeltaType, obj interface{}) error { id, err := f.KeyOf(obj) if err != nil { return KeyError{obj, err} } + + // Every object comes through this code path once, so this is a good + // place to call the transform func. + // + // If obj is a DeletedFinalStateUnknown tombstone or the action is a Sync, + // then the object have already gone through the transformer. + // + // If the objects already present in the cache are passed to Replace(), + // the transformer must be idempotent to avoid re-mutating them, + // or coordinate with all readers from the cache to avoid data races. + // Default informers do not pass existing objects to Replace. + if f.transformer != nil { + _, isTombstone := obj.(DeletedFinalStateUnknown) + if !isTombstone && internalActionType != Sync { + var err error + obj, err = f.transformer(obj) + if err != nil { + return err + } + } + } + oldDeltas := f.items[id] newDeltas := append(oldDeltas, Delta{actionType, obj}) newDeltas = dedupDeltas(newDeltas) @@ -526,6 +587,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] depth := len(f.queue) @@ -551,7 +613,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) defer trace.LogIfLong(100 * time.Millisecond) } - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err @@ -566,12 +628,11 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { // using the Sync or Replace DeltaType and then (2) it does some deletions. // In particular: for every pre-existing key K that is not the key of // an object in `list` there is the effect of -// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object -// of K. If `f.knownObjects == nil` then the pre-existing keys are -// those in `f.items` and the current object of K is the `.Newest()` -// of the Deltas associated with K. Otherwise the pre-existing keys -// are those listed by `f.knownObjects` and the current object of K is -// what `f.knownObjects.GetByKey(K)` returns. +// `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known +// object of K. The pre-existing keys are those in the union set of the keys in +// `f.items` and `f.knownObjects` (if not nil). The last known object for key K is +// the one present in the last delta in `f.items`. If there is no delta for K +// in `f.items`, it is the object in `f.knownObjects` func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { f.lock.Lock() defer f.lock.Unlock() @@ -590,56 +651,28 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { return KeyError{item, err} } keys.Insert(key) - if err := f.queueActionLocked(action, item); err != nil { + if err := f.queueActionInternalLocked(action, Replaced, item); err != nil { return fmt.Errorf("couldn't enqueue object: %v", err) } } - if f.knownObjects == nil { - // Do deletion detection against our own list. - queuedDeletions := 0 - for k, oldItem := range f.items { - if keys.Has(k) { - continue - } - // Delete pre-existing items not in the new list. - // This could happen if watch deletion event was missed while - // disconnected from apiserver. - var deletedObj interface{} - if n := oldItem.Newest(); n != nil { - deletedObj = n.Object - } - queuedDeletions++ - if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { - return err - } - } - - if !f.populated { - f.populated = true - // While there shouldn't be any queued deletions in the initial - // population of the queue, it's better to be on the safe side. - f.initialPopulationCount = keys.Len() + queuedDeletions - } - - return nil - } - - // Detect deletions not already in the queue. - knownKeys := f.knownObjects.ListKeys() + // Do deletion detection against objects in the queue queuedDeletions := 0 - for _, k := range knownKeys { + for k, oldItem := range f.items { if keys.Has(k) { continue } - - deletedObj, exists, err := f.knownObjects.GetByKey(k) - if err != nil { - deletedObj = nil - klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) - } else if !exists { - deletedObj = nil - klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + // Delete pre-existing items not in the new list. + // This could happen if watch deletion event was missed while + // disconnected from apiserver. + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + + // if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object + if d, ok := deletedObj.(DeletedFinalStateUnknown); ok { + deletedObj = d.Obj + } } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -647,6 +680,32 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { } } + if f.knownObjects != nil { + // Detect deletions for objects not present in the queue, but present in KnownObjects + knownKeys := f.knownObjects.ListKeys() + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + if len(f.items[k]) > 0 { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + } + if !f.populated { f.populated = true f.initialPopulationCount = keys.Len() + queuedDeletions diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index 3f272b80b..813916ebf 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,7 +20,6 @@ import ( "sync" "time" - "k8s.io/klog/v2" "k8s.io/utils/clock" ) @@ -100,7 +99,6 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj) c.cacheStorage.Delete(key) return nil, false } diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 8f3313783..dd13c4ea7 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -25,7 +25,7 @@ import ( // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the accumulator popped from the queue. -type PopProcessFunc func(interface{}) error +type PopProcessFunc func(obj interface{}, isInInitialList bool) error // ErrRequeue may be returned by a PopProcessFunc to safely requeue // the current item. The value of Err will be returned from Pop. @@ -82,9 +82,12 @@ type Queue interface { // Pop is helper function for popping from Queue. // WARNING: Do NOT use this function in non-test code to avoid races // unless you really really really really know what you are doing. +// +// NOTE: This function is deprecated and may be removed in the future without +// additional warning. func Pop(queue Queue) interface{} { var result interface{} - queue.Pop(func(obj interface{}) error { + queue.Pop(func(obj interface{}, isInInitialList bool) error { result = obj return nil }) @@ -149,6 +152,10 @@ func (f *FIFO) Close() { func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() + return f.hasSynced_locked() +} + +func (f *FIFO) hasSynced_locked() bool { return f.populated && f.initialPopulationCount == 0 } @@ -287,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { f.cond.Wait() } + isInInitialList := !f.hasSynced_locked() id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { @@ -298,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) - err := process(item) + err := process(item, isInInitialList) if e, ok := err.(ErrRequeue); ok { f.addIfNotPresent(id, item) err = e.Err diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go index 15d46f0b6..c5819fb6f 100644 --- a/vendor/k8s.io/client-go/tools/cache/index.go +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -47,11 +47,10 @@ type Indexer interface { // ByIndex returns the stored objects whose set of indexed values // for the named index includes the given indexed value ByIndex(indexName, indexedValue string) ([]interface{}, error) - // GetIndexer return the indexers + // GetIndexers return the indexers GetIndexers() Indexers - // AddIndexers adds more indexers to this store. If you call this after you already have data - // in the store, the results are undefined. + // AddIndexers adds more indexers to this store. This supports adding indexes after the store already has items. AddIndexers(newIndexers Indexers) error } diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index 7885d2f76..a60f44943 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -30,7 +30,7 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) -// ListAll calls appendFn with each value retrieved from store which matches the selector. +// ListAll lists items in the store matching the given selector, calling appendFn on each one. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { @@ -51,26 +51,12 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } -// ListAllByNamespace used to list items belongs to namespace from Indexer. +// ListAllByNamespace lists items in the given namespace in the store matching the given selector, +// calling appendFn on each one. +// If a blank namespace (NamespaceAll) is specified, this delegates to ListAll(). func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { - selectAll := selector.Empty() if namespace == metav1.NamespaceAll { - for _, m := range indexer.List() { - if selectAll { - // Avoid computing labels of the objects to speed up common flows - // of listing all objects. - appendFn(m) - continue - } - metadata, err := meta.Accessor(m) - if err != nil { - return err - } - if selector.Matches(labels.Set(metadata.GetLabels())) { - appendFn(m) - } - } - return nil + return ListAll(indexer, selector, appendFn) } items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) @@ -89,6 +75,8 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec } return nil } + + selectAll := selector.Empty() for _, m := range items { if selectAll { // Avoid computing labels of the objects to speed up common flows diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index 10b7e6512..f5708ffeb 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -36,6 +36,10 @@ type Lister interface { // Watcher is any object that knows how to start a watch on a resource. type Watcher interface { // Watch should begin a watch at the specified version. + // + // If Watch returns an error, it should handle its own cleanup, including + // but not limited to calling Stop() on the watch, if one was constructed. + // This allows the caller to ignore the watch, if the error is non-nil. Watch(options metav1.ListOptions) (watch.Interface, error) } diff --git a/vendor/k8s.io/client-go/tools/cache/object-names.go b/vendor/k8s.io/client-go/tools/cache/object-names.go new file mode 100644 index 000000000..aa8dbb199 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/object-names.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/types" +) + +// ObjectName is a reference to an object of some implicit kind +type ObjectName struct { + Namespace string + Name string +} + +// NewObjectName constructs a new one +func NewObjectName(namespace, name string) ObjectName { + return ObjectName{Namespace: namespace, Name: name} +} + +// Parts is the inverse of the constructor +func (objName ObjectName) Parts() (namespace, name string) { + return objName.Namespace, objName.Name +} + +// String returns the standard string encoding, +// which is designed to match the historical behavior of MetaNamespaceKeyFunc. +// Note this behavior is different from the String method of types.NamespacedName. +func (objName ObjectName) String() string { + if len(objName.Namespace) > 0 { + return objName.Namespace + "/" + objName.Name + } + return objName.Name +} + +// ParseObjectName tries to parse the standard encoding +func ParseObjectName(str string) (ObjectName, error) { + var objName ObjectName + var err error + objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str) + return objName, err +} + +// NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName +func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName { + return NewObjectName(nn.Namespace, nn.Name) +} + +// AsNamespacedName rebrands as a NamespacedName +func (objName ObjectName) AsNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name} +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 0708a6e8b..5e7dd5740 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -23,6 +23,7 @@ import ( "io" "math/rand" "reflect" + "strings" "sync" "time" @@ -37,24 +38,32 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" + "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "k8s.io/utils/trace" ) const defaultExpectedTypeName = "" +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + defaultMinWatchTimeout = 5 * time.Minute +) + // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { // name identifies this reflector. By default it will be a file:line if possible. name string - // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the // stringification of expectedType otherwise. It is for display // only, and should not be used for parsing or comparison. - expectedTypeName string + typeDescription string // An example object of the type we expect to place in the store. // Only the type needs to be right, except that when that is // `unstructured.Unstructured` the object's `"apiVersion"` and @@ -66,15 +75,11 @@ type Reflector struct { store Store // listerWatcher is used to perform lists and watches. listerWatcher ListerWatcher - // backoff manages backoff of ListWatch backoffManager wait.BackoffManager - // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. - initConnBackoffManager wait.BackoffManager - - resyncPeriod time.Duration - // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked - ShouldResync func() bool + resyncPeriod time.Duration + // minWatchTimeout defines the minimum timeout for watch requests. + minWatchTimeout time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -89,6 +94,8 @@ type Reflector struct { isLastSyncResourceVersionUnavailable bool // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion lastSyncResourceVersionMutex sync.RWMutex + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler // WatchListPageSize is the requested chunk size of initial and resync watch lists. // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") @@ -97,8 +104,21 @@ type Reflector struct { // etcd, which is significantly less efficient and may lead to serious performance and // scalability problems. WatchListPageSize int64 - // Called whenever the ListAndWatch drops the connection with an error. - watchErrorHandler WatchErrorHandler + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. + MaxInternalErrorRetryDuration time.Duration + // UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server. + // Streaming has the primary advantage of using fewer server's resources to fetch data. + // + // The old behaviour establishes a LIST request which gets data in chunks. + // Paginated list is less efficient and depending on the actual size of objects + // might result in an increased memory consumption of the APIServer. + // + // See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details + // + // TODO(#115478): Consider making reflector.UseWatchList a private field. Since we implemented "api streaming" on the etcd storage layer it should work. + UseWatchList *bool } // ResourceVersionUpdater is an interface that allows store implementation to @@ -129,22 +149,16 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case err == io.EOF: // watch closed normally case err == io.ErrUnexpectedEOF: - klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err) default: - utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err)) } } -var ( - // We try to spread the load on apiserver by setting timeouts for - // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. - minWatchTimeout = 5 * time.Minute -) - // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { @@ -153,7 +167,44 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa return indexer, reflector } -// NewReflector creates a new Reflector object which will keep the +// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack +// that is outside this package. See NewReflectorWithOptions for further information. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod}) +} + +// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further +// information. +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod}) +} + +// ReflectorOptions configures a Reflector. +type ReflectorOptions struct { + // Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line + // in the call stack that is outside this package. + Name string + + // TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted + // using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is + // "". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields + // are set, the type description is the string encoding of those. Otherwise, the type description is set to the + // go type of expectedType.. + TypeDescription string + + // ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0 + // (do not resync). + ResyncPeriod time.Duration + + // MinWatchTimeout, if non-zero, defines the minimum timeout for watch requests send to kube-apiserver. + // However, values lower than 5m will not be honored to avoid negative performance impact on controlplane. + MinWatchTimeout time.Duration + + // Clock allows tests to control time. If unset defaults to clock.RealClock{} + Clock clock.Clock +} + +// NewReflectorWithOptions creates a new Reflector object which will keep the // given store up to date with the server's contents for the given // resource. Reflector promises to only put things in the store that // have the type of expectedType, unless expectedType is nil. If @@ -163,49 +214,84 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa // "yes". This enables you to use reflectors to periodically process // everything as well as incrementally processing the things that // change. -func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) -} - -// NewNamedReflector same as NewReflector, but with a specified name for logging -func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - realClock := &clock.RealClock{} +func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector { + reflectorClock := options.Clock + if reflectorClock == nil { + reflectorClock = clock.RealClock{} + } + minWatchTimeout := defaultMinWatchTimeout + if options.MinWatchTimeout > defaultMinWatchTimeout { + minWatchTimeout = options.MinWatchTimeout + } r := &Reflector{ - name: name, - listerWatcher: lw, - store: store, + name: options.Name, + resyncPeriod: options.ResyncPeriod, + minWatchTimeout: minWatchTimeout, + typeDescription: options.TypeDescription, + listerWatcher: lw, + store: store, // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. - backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), - resyncPeriod: resyncPeriod, - clock: realClock, - watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock), + clock: reflectorClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + expectedType: reflect.TypeOf(expectedType), + } + + if r.name == "" { + r.name = naming.GetNameFromCallsite(internalPackages...) + } + + if r.typeDescription == "" { + r.typeDescription = getTypeDescriptionFromObject(expectedType) + } + + if r.expectedGVK == nil { + r.expectedGVK = getExpectedGVKFromObject(expectedType) } - r.setExpectedType(expectedType) + + // don't overwrite UseWatchList if already set + // because the higher layers (e.g. storage/cacher) disabled it on purpose + if r.UseWatchList == nil { + r.UseWatchList = ptr.To(clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient)) + } + return r } -func (r *Reflector) setExpectedType(expectedType interface{}) { - r.expectedType = reflect.TypeOf(expectedType) - if r.expectedType == nil { - r.expectedTypeName = defaultExpectedTypeName - return +func getTypeDescriptionFromObject(expectedType interface{}) string { + if expectedType == nil { + return defaultExpectedTypeName } - r.expectedTypeName = r.expectedType.String() + reflectDescription := reflect.TypeOf(expectedType).String() - if obj, ok := expectedType.(*unstructured.Unstructured); ok { - // Use gvk to check that watch event objects are of the desired type. - gvk := obj.GroupVersionKind() - if gvk.Empty() { - klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) - return - } - r.expectedGVK = &gvk - r.expectedTypeName = gvk.String() + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return reflectDescription + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return reflectDescription + } + + return gvk.String() +} + +func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind { + obj, ok := expectedType.(*unstructured.Unstructured) + if !ok { + return nil + } + + gvk := obj.GroupVersionKind() + if gvk.Empty() { + return nil } + + return &gvk } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -216,13 +302,13 @@ var internalPackages = []string{"client-go/tools/cache/"} // objects and subsequent deltas. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) wait.BackoffUntil(func() { if err := r.ListAndWatch(stopCh); err != nil { r.watchErrorHandler(r, err) } }, r.backoffManager, true, stopCh) - klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name) } var ( @@ -231,7 +317,7 @@ var ( // Used to indicate that watching stopped because of a signal from the stop // channel passed in from a client of the reflector. - errorStopRequested = errors.New("Stop requested") + errorStopRequested = errors.New("stop requested") ) // resyncChan returns a channel which will receive something when a resync is @@ -252,194 +338,150 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) - var resourceVersion string - - options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} - - if err := func() error { - initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name}) - defer initTrace.LogIfLong(10 * time.Second) - var list runtime.Object - var paginatedResult bool - var err error - listCh := make(chan struct{}, 1) - panicCh := make(chan interface{}, 1) - go func() { - defer func() { - if r := recover(); r != nil { - panicCh <- r - } - }() - // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first - // list request will return the full response. - pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { - return r.listerWatcher.List(opts) - })) - switch { - case r.WatchListPageSize != 0: - pager.PageSize = r.WatchListPageSize - case r.paginatedResult: - // We got a paginated result initially. Assume this resource and server honor - // paging requests (i.e. watch cache is probably disabled) and leave the default - // pager size set. - case options.ResourceVersion != "" && options.ResourceVersion != "0": - // User didn't explicitly request pagination. - // - // With ResourceVersion != "", we have a possibility to list from watch cache, - // but we do that (for ResourceVersion != "0") only if Limit is unset. - // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly - // switch off pagination to force listing from watch cache (if enabled). - // With the existing semantic of RV (result is at least as fresh as provided RV), - // this is correct and doesn't lead to going back in time. - // - // We also don't turn off pagination for ResourceVersion="0", since watch cache - // is ignoring Limit in that case anyway, and if watch cache is not enabled - // we don't introduce regression. - pager.PageSize = 0 - } + klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name) + var err error + var w watch.Interface + useWatchList := ptr.Deref(r.UseWatchList, false) + fallbackToList := !useWatchList - list, paginatedResult, err = pager.List(context.Background(), options) - if isExpiredError(err) || isTooLargeResourceVersionError(err) { - r.setIsLastSyncResourceVersionUnavailable(true) - // Retry immediately if the resource version used to list is unavailable. - // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on - // continuation pages, but the pager might not be enabled, the full list might fail because the - // resource version it is listing at is expired or the cache may not yet be synced to the provided - // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure - // the reflector makes forward progress. - list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) - } - close(listCh) - }() - select { - case <-stopCh: + if useWatchList { + w, err = r.watchList(stopCh) + if w == nil && err == nil { + // stopCh was closed return nil - case r := <-panicCh: - panic(r) - case <-listCh: } - initTrace.Step("Objects listed", trace.Field{"error", err}) if err != nil { - klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) - return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err) - } - - // We check if the list was paginated and if so set the paginatedResult based on that. - // However, we want to do that only for the initial list (which is the only case - // when we set ResourceVersion="0"). The reasoning behind it is that later, in some - // situations we may force listing directly from etcd (by setting ResourceVersion="") - // which will return paginated result, even if watch cache is enabled. However, in - // that case, we still want to prefer sending requests to watch cache if possible. - // - // Paginated result returned for request with ResourceVersion="0" mean that watch - // cache is disabled and there are a lot of objects of a given type. In such case, - // there is no need to prefer listing from watch cache. - if options.ResourceVersion == "0" && paginatedResult { - r.paginatedResult = true + klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err) + fallbackToList = true + // ensure that we won't accidentally pass some garbage down the watch. + w = nil } + } - r.setIsLastSyncResourceVersionUnavailable(false) // list was successful - listMetaInterface, err := meta.ListAccessor(list) + if fallbackToList { + err = r.list(stopCh) if err != nil { - return fmt.Errorf("unable to understand list result %#v: %v", list, err) + return err } - resourceVersion = listMetaInterface.GetResourceVersion() - initTrace.Step("Resource version extracted") - items, err := meta.ExtractList(list) - if err != nil { - return fmt.Errorf("unable to understand list result %#v (%v)", list, err) + } + + klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) + return r.watchWithResync(w, stopCh) +} + +// startResync periodically calls r.store.Resync() method. +// Note that this method is blocking and should be +// called in a separate goroutine. +func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return } - initTrace.Step("Objects extracted") - if err := r.syncWith(items, resourceVersion); err != nil { - return fmt.Errorf("unable to sync list result: %v", err) + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } } - initTrace.Step("SyncWith done") - r.setLastSyncResourceVersion(resourceVersion) - initTrace.Step("Resource version updated") - return nil - }(); err != nil { - return err + cleanup() + resyncCh, cleanup = r.resyncChan() } +} +// watchWithResync runs watch with startResync in the background. +func (r *Reflector) watchWithResync(w watch.Interface, stopCh <-chan struct{}) error { resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) - go func() { - resyncCh, cleanup := r.resyncChan() - defer func() { - cleanup() // Call the last one written into cleanup - }() - for { - select { - case <-resyncCh: - case <-stopCh: - return - case <-cancelCh: - return - } - if r.ShouldResync == nil || r.ShouldResync() { - klog.V(4).Infof("%s: forcing resync", r.name) - if err := r.store.Resync(); err != nil { - resyncerrc <- err - return - } - } - cleanup() - resyncCh, cleanup = r.resyncChan() - } - }() + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + +// watch simply starts a watch request with the server. +func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { + var err error + retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) for { // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { case <-stopCh: + // we can only end up here when the stopCh + // was closed after a successful watchlist or list request + if w != nil { + w.Stop() + } return nil default: } - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) - options = metav1.ListOptions{ - ResourceVersion: resourceVersion, - // We want to avoid situations of hanging watchers. Stop any wachers that do not - // receive any events within the timeout window. - TimeoutSeconds: &timeoutSeconds, - // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. - // Reflector doesn't assume bookmarks are returned at all (if the server do not support - // watch bookmarks, it will ignore this field). - AllowWatchBookmarks: true, - } - // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent start := r.clock.Now() - w, err := r.listerWatcher.Watch(options) - if err != nil { - // If this is "connection refused" error, it means that most likely apiserver is not responsive. - // It doesn't make sense to re-list all objects because most likely we will be able to restart - // watch where we ended. - // If that's the case begin exponentially backing off and resend watch request. - // Do the same for "429" errors. - if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { - <-r.initConnBackoffManager.Backoff().C() - continue + + if w == nil { + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: r.LastSyncResourceVersion(), + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err) + select { + case <-stopCh: + return nil + case <-r.backoffManager.Backoff().C(): + continue + } + } + return err } - return err } - if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil { - if err != errorStopRequested { + err = handleWatch(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, + r.clock, resyncerrc, stopCh) + // Ensure that watch will not be reused across iterations. + w.Stop() + w = nil + retry.After(err) + if err != nil { + if !errors.Is(err, errorStopRequested) { switch { case isExpiredError(err): // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already // has a semantic that it returns data at least as fresh as provided RV. // So first try to LIST with setting RV to resource version of last observed object. - klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err) case apierrors.IsTooManyRequests(err): - klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) - <-r.initConnBackoffManager.Backoff().C() + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription) + select { + case <-stopCh: + return nil + case <-r.backoffManager.Backoff().C(): + continue + } + case apierrors.IsInternalError(err) && retry.ShouldRetry(): + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err) continue default: - klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err) } } return nil @@ -447,6 +489,226 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } } +// list simply lists all items and records a resource version obtained from the server at the moment of the call. +// the resource version can be used for further progress notification (aka. watch). +func (r *Reflector) list(stopCh <-chan struct{}) error { + var resourceVersion string + options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} + + initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + var list runtime.Object + var paginatedResult bool + var err error + listCh := make(chan struct{}, 1) + panicCh := make(chan interface{}, 1) + go func() { + defer func() { + if r := recover(); r != nil { + panicCh <- r + } + }() + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + switch { + case r.WatchListPageSize != 0: + pager.PageSize = r.WatchListPageSize + case r.paginatedResult: + // We got a paginated result initially. Assume this resource and server honor + // paging requests (i.e. watch cache is probably disabled) and leave the default + // pager size set. + case options.ResourceVersion != "" && options.ResourceVersion != "0": + // User didn't explicitly request pagination. + // + // With ResourceVersion != "", we have a possibility to list from watch cache, + // but we do that (for ResourceVersion != "0") only if Limit is unset. + // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly + // switch off pagination to force listing from watch cache (if enabled). + // With the existing semantic of RV (result is at least as fresh as provided RV), + // this is correct and doesn't lead to going back in time. + // + // We also don't turn off pagination for ResourceVersion="0", since watch cache + // is ignoring Limit in that case anyway, and if watch cache is not enabled + // we don't introduce regression. + pager.PageSize = 0 + } + + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options) + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + r.setIsLastSyncResourceVersionUnavailable(true) + // Retry immediately if the resource version used to list is unavailable. + // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on + // continuation pages, but the pager might not be enabled, the full list might fail because the + // resource version it is listing at is expired or the cache may not yet be synced to the provided + // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure + // the reflector makes forward progress. + list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + } + close(listCh) + }() + select { + case <-stopCh: + return nil + case r := <-panicCh: + panic(r) + case <-listCh: + } + initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) + if err != nil { + klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err) + return fmt.Errorf("failed to list %v: %w", r.typeDescription, err) + } + + // We check if the list was paginated and if so set the paginatedResult based on that. + // However, we want to do that only for the initial list (which is the only case + // when we set ResourceVersion="0"). The reasoning behind it is that later, in some + // situations we may force listing directly from etcd (by setting ResourceVersion="") + // which will return paginated result, even if watch cache is enabled. However, in + // that case, we still want to prefer sending requests to watch cache if possible. + // + // Paginated result returned for request with ResourceVersion="0" mean that watch + // cache is disabled and there are a lot of objects of a given type. In such case, + // there is no need to prefer listing from watch cache. + if options.ResourceVersion == "0" && paginatedResult { + r.paginatedResult = true + } + + r.setIsLastSyncResourceVersionUnavailable(false) // list was successful + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v: %v", list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + initTrace.Step("Resource version extracted") + items, err := meta.ExtractListWithAlloc(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v (%v)", list, err) + } + initTrace.Step("Objects extracted") + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("unable to sync list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + initTrace.Step("Resource version updated") + return nil +} + +// watchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a consistent stream with the server. +// That means the returned data is consistent, as if, served directly from etcd via a quorum read. +// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion. +// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +// +// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan) +// Establishes a stream with the server at the provided resource version. +// To establish the initial state the server begins with synthetic "Added" events. +// It ends with a synthetic "Bookmark" event containing the provided or newer resource version. +// After receiving a "Bookmark" event the reflector is considered to be synchronized. +// It replaces its internal store with the collected items and +// reuses the current watch requests for getting further events. +func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { + var w watch.Interface + var err error + var temporaryStore Store + var resourceVersion string + // TODO(#115478): see if this function could be turned + // into a method and see if error handling + // could be unified with the r.watch method + isErrorRetriableWithSideEffectsFn := func(err error) bool { + if canRetry := isWatchErrorRetriable(err); canRetry { + klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err) + <-r.backoffManager.Backoff().C() + return true + } + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + // we tried to re-establish a watch request but the provided RV + // has either expired or it is greater than the server knows about. + // In that case we reset the RV and + // try to get a consistent snapshot from the watch cache (case 1) + r.setIsLastSyncResourceVersionUnavailable(true) + return true + } + return false + } + + initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + for { + select { + case <-stopCh: + return nil, nil + default: + } + + resourceVersion = "" + lastKnownRV := r.rewatchResourceVersion() + temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + // TODO(#115478): large "list", slow clients, slow network, p&f + // might slow down streaming and eventually fail. + // maybe in such a case we should retry with an increased timeout? + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: lastKnownRV, + AllowWatchBookmarks: true, + SendInitialEvents: pointer.Bool(true), + ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan, + TimeoutSeconds: &timeoutSeconds, + } + start := r.clock.Now() + + w, err = r.listerWatcher.Watch(options) + if err != nil { + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + watchListBookmarkReceived, err := handleListWatch(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + func(rv string) { resourceVersion = rv }, + r.clock, make(chan error), stopCh) + if err != nil { + w.Stop() // stop and retry with clean state + if errors.Is(err, errorStopRequested) { + return nil, nil + } + if isErrorRetriableWithSideEffectsFn(err) { + continue + } + return nil, err + } + if watchListBookmarkReceived { + break + } + } + // We successfully got initial state from watch-list confirmed by the + // "k8s.io/initial-events-end" bookmark. + initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) + r.setIsLastSyncResourceVersionUnavailable(false) + + // we utilize the temporaryStore to ensure independence from the current store implementation. + // as of today, the store is implemented as a queue and will be drained by the higher-level + // component as soon as it finishes replacing the content. + checkWatchListDataConsistencyIfRequested(wait.ContextForChannel(stopCh), r.name, resourceVersion, wrapListFuncWithContext(r.listerWatcher.List), temporaryStore.List) + + if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %w", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + + return w, nil +} + // syncWith replaces the store's items with the given list. func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { found := make([]interface{}, 0, len(items)) @@ -456,85 +718,158 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err return r.store.Replace(found, resourceVersion) } -// watchHandler watches w and keeps *resourceVersion up to date. -func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { - eventCount := 0 +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. If successful, the watcher will be left open after receiving the +// initial set of objects, to allow watching for future events. +func handleListWatch( + start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + exitOnWatchListBookmarkReceived := true + return handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) +} - // Stopping the watcher should be idempotent and if we return from this function there's no way - // we're coming back in with the same watch interface. - defer w.Stop() +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. The watcher will always be stopped on exit. +func handleWatch( + start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) error { + exitOnWatchListBookmarkReceived := false + _, err := handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) + return err +} + +// handleAnyWatch consumes events from w, updates the Store, and records the last +// seen ResourceVersion, to allow continuing from that ResourceVersion on retry. +// If exitOnWatchListBookmarkReceived is true, the watch events will be consumed +// until a bookmark event is received with the WatchList annotation present. +// Returns true (watchListBookmarkReceived) if the WatchList bookmark was +// received, even if exitOnWatchListBookmarkReceived is false. +// The watcher will always be stopped, unless exitOnWatchListBookmarkReceived is +// true and watchListBookmarkReceived is true. This allows the same watch stream +// to be re-used by the caller to continue watching for new events. +func handleAnyWatch(start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + exitOnWatchListBookmarkReceived bool, + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + watchListBookmarkReceived := false + eventCount := 0 + initialEventsEndBookmarkWarningTicker := newInitialEventsEndBookmarkTicker(name, clock, start, exitOnWatchListBookmarkReceived) + defer initialEventsEndBookmarkWarningTicker.Stop() loop: for { select { case <-stopCh: - return errorStopRequested - case err := <-errc: - return err + return watchListBookmarkReceived, errorStopRequested + case err := <-errCh: + return watchListBookmarkReceived, err case event, ok := <-w.ResultChan(): if !ok { break loop } if event.Type == watch.Error { - return apierrors.FromObject(event.Object) + return watchListBookmarkReceived, apierrors.FromObject(event.Object) } - if r.expectedType != nil { - if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a { - utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a)) + if expectedType != nil { + if e, a := expectedType, reflect.TypeOf(event.Object); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", name, e, a)) continue } } - if r.expectedGVK != nil { - if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { - utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a)) + if expectedGVK != nil { + if e, a := *expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", name, e, a)) continue } } meta, err := meta.Accessor(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) continue } - newResourceVersion := meta.GetResourceVersion() + resourceVersion := meta.GetResourceVersion() switch event.Type { case watch.Added: - err := r.store.Add(event.Object) + err := store.Add(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err)) + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", name, event.Object, err)) } case watch.Modified: - err := r.store.Update(event.Object) + err := store.Update(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err)) + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", name, event.Object, err)) } case watch.Deleted: // TODO: Will any consumers need access to the "last known // state", which is passed in event.Object? If so, may need // to change this. - err := r.store.Delete(event.Object) + err := store.Delete(event.Object) if err != nil { - utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", name, event.Object, err)) } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + watchListBookmarkReceived = true + } default: - utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) } - *resourceVersion = newResourceVersion - r.setLastSyncResourceVersion(newResourceVersion) - if rvu, ok := r.store.(ResourceVersionUpdater); ok { - rvu.UpdateResourceVersion(newResourceVersion) + setLastSyncResourceVersion(resourceVersion) + if rvu, ok := store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(resourceVersion) } eventCount++ + if exitOnWatchListBookmarkReceived && watchListBookmarkReceived { + watchDuration := clock.Since(start) + klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) + return watchListBookmarkReceived, nil + } + initialEventsEndBookmarkWarningTicker.observeLastEventTimeStamp(clock.Now()) + case <-initialEventsEndBookmarkWarningTicker.C(): + initialEventsEndBookmarkWarningTicker.warnIfExpired() } } - watchDuration := r.clock.Since(start) + watchDuration := clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { - return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) + return watchListBookmarkReceived, fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) } - klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount) - return nil + klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount) + return watchListBookmarkReceived, nil } // LastSyncResourceVersion is the resource version observed when last sync with the underlying store @@ -574,6 +909,18 @@ func (r *Reflector) relistResourceVersion() string { return r.lastSyncResourceVersion } +// rewatchResourceVersion determines the resource version the reflector should start streaming from. +func (r *Reflector) rewatchResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + if r.isLastSyncResourceVersionUnavailable { + // initial stream should return data at the most recent resource version. + // the returned data must be consistent i.e. as if served from etcd via a quorum read + return "" + } + return r.lastSyncResourceVersion +} + // setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned // "expired" or "too large resource version" error. func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { @@ -612,5 +959,117 @@ func isTooLargeResourceVersionError(err error) bool { return true } } + + // Matches the message returned by api server before 1.17.0 + if strings.Contains(apierr.Status().Message, "Too large resource version") { + return true + } + + return false +} + +// isWatchErrorRetriable determines if it is safe to retry +// a watch error retrieved from the server. +func isWatchErrorRetriable(err error) bool { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + return true + } return false } + +// wrapListFuncWithContext simply wraps ListFunction into another function that accepts a context and ignores it. +func wrapListFuncWithContext(listFn ListFunc) func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + return func(_ context.Context, options metav1.ListOptions) (runtime.Object, error) { + return listFn(options) + } +} + +// initialEventsEndBookmarkTicker a ticker that produces a warning if the bookmark event +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note: +// The methods exposed by this type are not thread-safe. +type initialEventsEndBookmarkTicker struct { + clock.Ticker + clock clock.Clock + name string + + watchStart time.Time + tickInterval time.Duration + lastEventObserveTime time.Time +} + +// newInitialEventsEndBookmarkTicker returns a noop ticker if exitOnInitialEventsEndBookmarkRequested is false. +// Otherwise, it returns a ticker that exposes a method producing a warning if the bookmark event, +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note that the caller controls whether to call t.C() and t.Stop(). +// +// In practice, the reflector exits the watchHandler as soon as the bookmark event is received and calls the t.C() method. +func newInitialEventsEndBookmarkTicker(name string, c clock.Clock, watchStart time.Time, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + return newInitialEventsEndBookmarkTickerInternal(name, c, watchStart, 10*time.Second, exitOnWatchListBookmarkReceived) +} + +func newInitialEventsEndBookmarkTickerInternal(name string, c clock.Clock, watchStart time.Time, tickInterval time.Duration, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + clockWithTicker, ok := c.(clock.WithTicker) + if !ok || !exitOnWatchListBookmarkReceived { + if exitOnWatchListBookmarkReceived { + klog.Warningf("clock does not support WithTicker interface but exitOnInitialEventsEndBookmark was requested") + } + return &initialEventsEndBookmarkTicker{ + Ticker: &noopTicker{}, + } + } + + return &initialEventsEndBookmarkTicker{ + Ticker: clockWithTicker.NewTicker(tickInterval), + clock: c, + name: name, + watchStart: watchStart, + tickInterval: tickInterval, + } +} + +func (t *initialEventsEndBookmarkTicker) observeLastEventTimeStamp(lastEventObserveTime time.Time) { + t.lastEventObserveTime = lastEventObserveTime +} + +func (t *initialEventsEndBookmarkTicker) warnIfExpired() { + if err := t.produceWarningIfExpired(); err != nil { + klog.Warning(err) + } +} + +// produceWarningIfExpired returns an error that represents a warning when +// the time elapsed since the last received event exceeds the tickInterval. +// +// Note that this method should be called when t.C() yields a value. +func (t *initialEventsEndBookmarkTicker) produceWarningIfExpired() error { + if _, ok := t.Ticker.(*noopTicker); ok { + return nil /*noop ticker*/ + } + if t.lastEventObserveTime.IsZero() { + return fmt.Errorf("%s: awaiting required bookmark event for initial events stream, no events received for %v", t.name, t.clock.Since(t.watchStart)) + } + elapsedTime := t.clock.Now().Sub(t.lastEventObserveTime) + hasBookmarkTimerExpired := elapsedTime >= t.tickInterval + + if !hasBookmarkTimerExpired { + return nil + } + return fmt.Errorf("%s: hasn't received required bookmark event marking the end of initial events stream, received last event %v ago", t.name, elapsedTime) +} + +var _ clock.Ticker = &noopTicker{} + +// TODO(#115478): move to k8s/utils repo +type noopTicker struct{} + +func (t *noopTicker) C() <-chan time.Time { return nil } + +func (t *noopTicker) Stop() {} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go new file mode 100644 index 000000000..a7e0d9c43 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -0,0 +1,43 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/consistencydetector" +) + +// checkWatchListDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call against etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { + if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { + return + } + // for informers we pass an empty ListOptions because + // listFn might be wrapped for filtering during informer construction. + consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) +} diff --git a/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go new file mode 100644 index 000000000..8201fb153 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/utils/clock" + "time" +) + +type RetryWithDeadline interface { + After(error) + ShouldRetry() bool +} + +type retryWithDeadlineImpl struct { + firstErrorTime time.Time + lastErrorTime time.Time + maxRetryDuration time.Duration + minResetPeriod time.Duration + isRetryable func(error) bool + clock clock.Clock +} + +func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline { + return &retryWithDeadlineImpl{ + firstErrorTime: time.Time{}, + lastErrorTime: time.Time{}, + maxRetryDuration: maxRetryDuration, + minResetPeriod: minResetPeriod, + isRetryable: isRetryable, + clock: clock, + } +} + +func (r *retryWithDeadlineImpl) reset() { + r.firstErrorTime = time.Time{} + r.lastErrorTime = time.Time{} +} + +func (r *retryWithDeadlineImpl) After(err error) { + if r.isRetryable(err) { + if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod { + r.reset() + } + + if r.firstErrorTime.IsZero() { + r.firstErrorTime = r.clock.Now() + } + r.lastErrorTime = r.clock.Now() + } +} + +func (r *retryWithDeadlineImpl) ShouldRetry() bool { + if r.maxRetryDuration <= time.Duration(0) { + return false + } + + if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration { + return true + } + + r.reset() + return false +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 4b7fc04e3..c805030bd 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -17,6 +17,7 @@ limitations under the License. package cache import ( + "errors" "fmt" "sync" "time" @@ -25,10 +26,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache/synctrack" "k8s.io/utils/buffer" "k8s.io/utils/clock" "k8s.io/klog/v2" + + clientgofeaturegate "k8s.io/client-go/features" ) // SharedInformer provides eventually consistent linkage of its @@ -131,10 +135,14 @@ import ( // state, except that its ResourceVersion is replaced with a // ResourceVersion in which the object is actually absent. type SharedInformer interface { - // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination - // between different handlers. - AddEventHandler(handler ResourceEventHandler) + // AddEventHandler adds an event handler to the shared informer using + // the shared informer's resync period. Events to a single handler are + // delivered sequentially, but there is no coordination between + // different handlers. + // It returns a registration handle for the handler that can be used to + // remove the handler again, or to tell if the handler is synced (has + // seen every item in the initial list). + AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) // AddEventHandlerWithResyncPeriod adds an event handler to the // shared informer with the requested resync period; zero means // this handler does not care about resyncs. The resync operation @@ -149,7 +157,13 @@ type SharedInformer interface { // between any two resyncs may be longer than the nominal period // because the implementation takes time to do work and there may // be competing load and scheduling noise. - AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) + // It returns a registration handle for the handler that can be used to remove + // the handler again and an error if the handler cannot be added. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) + // RemoveEventHandler removes a formerly added event handler given by + // its registration handle. + // This function is guaranteed to be idempotent, and thread-safe. + RemoveEventHandler(handle ResourceEventHandlerRegistration) error // GetStore returns the informer's local cache as a Store. GetStore() Store // GetController is deprecated, it does nothing useful @@ -160,6 +174,10 @@ type SharedInformer interface { // HasSynced returns true if the shared informer's store has been // informed by at least one full LIST of the authoritative state // of the informer's object collection. This is unrelated to "resync". + // + // Note that this doesn't tell you if an individual handler is synced!! + // For that, please call HasSynced on the handle returned by + // AddEventHandler. HasSynced() bool // LastSyncResourceVersion is the resource version observed when last synced with the underlying // store. The value returned is not synchronized with access to the underlying store and is not @@ -180,6 +198,34 @@ type SharedInformer interface { // The handler should return quickly - any expensive processing should be // offloaded. SetWatchErrorHandler(handler WatchErrorHandler) error + + // The TransformFunc is called for each object which is about to be stored. + // + // This function is intended for you to take the opportunity to + // remove, transform, or normalize fields. One use case is to strip unused + // metadata fields out of objects to save on RAM cost. + // + // Must be set before starting the informer. + // + // Please see the comment on TransformFunc for more details. + SetTransform(handler TransformFunc) error + + // IsStopped reports whether the informer has already been stopped. + // Adding event handlers to already stopped informers is not possible. + // An informer already stopped will never be started again. + IsStopped() bool +} + +// Opaque interface representing the registration of ResourceEventHandler for +// a SharedInformer. Must be supplied back to the same SharedInformer's +// `RemoveEventHandler` to unregister the handlers. +// +// Also used to tell if the handler is synced (has had all items in the initial +// list delivered). +type ResourceEventHandlerRegistration interface { + // HasSynced reports if both the parent has synced and all pre-sync + // events have been delivered. + HasSynced() bool } // SharedIndexInformer provides add and get Indexers ability based on SharedInformer. @@ -190,14 +236,26 @@ type SharedIndexInformer interface { GetIndexer() Indexer } -// NewSharedInformer creates a new instance for the listwatcher. +// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details. func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) } -// NewSharedIndexInformer creates a new instance for the listwatcher. -// The created informer will not do resyncs if the given -// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See +// NewSharedIndexInformerWithOptions for full details. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + return NewSharedIndexInformerWithOptions( + lw, + exampleObject, + SharedIndexInformerOptions{ + ResyncPeriod: defaultEventHandlerResyncPeriod, + Indexers: indexers, + }, + ) +} + +// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher. +// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each // handler that with a non-zero requested resync period, whether added // before or after the informer starts, the nominal resync period is // the requested resync period rounded up to a multiple of the @@ -205,21 +263,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv // checking period is established when the informer starts running, // and is the maximum of (a) the minimum of the resync periods // requested before the informer starts and the -// defaultEventHandlerResyncPeriod given here and (b) the constant +// options.ResyncPeriod given here and (b) the constant // `minimumResyncPeriod` defined in this file. -func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { +func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer { realClock := &clock.RealClock{} - sharedIndexInformer := &sharedIndexInformer{ + + return &sharedIndexInformer{ + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers), processor: &sharedProcessor{clock: realClock}, - indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), listerWatcher: lw, objectType: exampleObject, - resyncCheckPeriod: defaultEventHandlerResyncPeriod, - defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, - cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + objectDescription: options.ObjectDescription, + resyncCheckPeriod: options.ResyncPeriod, + defaultEventHandlerResyncPeriod: options.ResyncPeriod, clock: realClock, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), } - return sharedIndexInformer +} + +// SharedIndexInformerOptions configures a sharedIndexInformer. +type SharedIndexInformerOptions struct { + // ResyncPeriod is the default event handler resync period and resync check + // period. If unset/unspecified, these are defaulted to 0 (do not resync). + ResyncPeriod time.Duration + + // Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured. + Indexers Indexers + + // ObjectDescription is the sharedIndexInformer's object description. This is passed through to the + // underlying Reflector's type description. + ObjectDescription string } // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. @@ -244,7 +317,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS return false } - klog.Infof("Caches are synced for %s ", controllerName) + klog.Infof("Caches are synced for %s", controllerName) return true } @@ -263,11 +336,9 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - klog.V(2).Infof("stop requested") return false } - klog.V(4).Infof("caches populated") return true } @@ -293,12 +364,13 @@ type sharedIndexInformer struct { listerWatcher ListerWatcher - // objectType is an example object of the type this informer is - // expected to handle. Only the type needs to be right, except - // that when that is `unstructured.Unstructured` the object's - // `"apiVersion"` and `"kind"` must also be right. + // objectType is an example object of the type this informer is expected to handle. If set, an event + // with an object with a mismatching type is dropped instead of being delivered to listeners. objectType runtime.Object + // objectDescription is the description of this informer's objects. This typically defaults to + objectDescription string + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call // shouldResync to check if any of our listeners need a resync. resyncCheckPeriod time.Duration @@ -318,6 +390,8 @@ type sharedIndexInformer struct { // Called whenever the ListAndWatch drops the connection with an error. watchErrorHandler WatchErrorHandler + + transform TransformFunc } // dummyController hides the fact that a SharedInformer is different from a dedicated one @@ -337,6 +411,10 @@ func (v *dummyController) HasSynced() bool { } func (v *dummyController) LastSyncResourceVersion() string { + if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InformerResourceVersion) { + return v.informer.LastSyncResourceVersion() + } + return "" } @@ -346,7 +424,8 @@ type updateNotification struct { } type addNotification struct { - newObj interface{} + newObj interface{} + isInInitialList bool } type deleteNotification struct { @@ -365,6 +444,18 @@ func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) er return nil } +func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + s.transform = handler + return nil +} + func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() @@ -372,27 +463,30 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") return } - fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: s.indexer, - EmitDeltaTypeReplaced: true, - }) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.resyncCheckPeriod, - RetryOnError: false, - ShouldResync: s.processor.shouldResync, - - Process: s.HandleDeltas, - WatchErrorHandler: s.watchErrorHandler, - } func() { s.startedLock.Lock() defer s.startedLock.Unlock() + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: s.indexer, + EmitDeltaTypeReplaced: true, + Transformer: s.transform, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + ObjectDescription: s.objectDescription, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + WatchErrorHandler: s.watchErrorHandler, + } + s.controller = New(cfg) s.controller.(*controller).clock = s.clock s.started = true @@ -452,8 +546,8 @@ func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { s.startedLock.Lock() defer s.startedLock.Unlock() - if s.started { - return fmt.Errorf("informer has already started") + if s.stopped { + return fmt.Errorf("indexer was not added because it has stopped already") } return s.indexer.AddIndexers(indexers) @@ -463,8 +557,8 @@ func (s *sharedIndexInformer) GetController() Controller { return &dummyController{informer: s} } -func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) { - s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) { + return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) } func determineResyncPeriod(desired, check time.Duration) time.Duration { @@ -484,13 +578,12 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration { const minimumResyncPeriod = 1 * time.Second -func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) { +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) { s.startedLock.Lock() defer s.startedLock.Unlock() if s.stopped { - klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) - return + return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler) } if resyncPeriod > 0 { @@ -513,11 +606,10 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced) if !s.started { - s.processor.addListener(listener) - return + return s.processor.addListener(listener), nil } // in order to safely join, we have to @@ -528,55 +620,86 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv s.blockDeltas.Lock() defer s.blockDeltas.Unlock() - s.processor.addListener(listener) + handle := s.processor.addListener(listener) for _, item := range s.indexer.List() { - listener.add(addNotification{newObj: item}) + // Note that we enqueue these notifications with the lock held + // and before returning the handle. That means there is never a + // chance for anyone to call the handle's HasSynced method in a + // state when it would falsely return true (i.e., when the + // shared informer is synced but it has not observed an Add + // with isInitialList being true, nor when the thread + // processing notifications somehow goes faster than this + // thread adding them and the counter is temporarily zero). + listener.add(addNotification{newObj: item, isInInitialList: true}) } + return handle, nil } -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { +func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error { s.blockDeltas.Lock() defer s.blockDeltas.Unlock() - // from oldest to newest - for _, d := range obj.(Deltas) { - switch d.Type { - case Sync, Replaced, Added, Updated: - s.cacheMutationDetector.AddObject(d.Object) - if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { - if err := s.indexer.Update(d.Object); err != nil { - return err - } + if deltas, ok := obj.(Deltas); ok { + return processDeltas(s, s.indexer, deltas, isInInitialList) + } + return errors.New("object given as Process argument is not Deltas") +} - isSync := false - switch { - case d.Type == Sync: - // Sync events are only propagated to listeners that requested resync - isSync = true - case d.Type == Replaced: - if accessor, err := meta.Accessor(d.Object); err == nil { - if oldAccessor, err := meta.Accessor(old); err == nil { - // Replaced events that didn't change resourceVersion are treated as resync events - // and only propagated to listeners that requested resync - isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion() - } - } - } - s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync) - } else { - if err := s.indexer.Add(d.Object); err != nil { - return err - } - s.processor.distribute(addNotification{newObj: d.Object}, false) - } - case Deleted: - if err := s.indexer.Delete(d.Object); err != nil { - return err - } - s.processor.distribute(deleteNotification{oldObj: d.Object}, false) +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(obj) + s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnUpdate(old, new interface{}) { + isSync := false + + // If is a Sync event, isSync should be true + // If is a Replaced event, isSync is true if resource version is unchanged. + // If RV is unchanged: this is a Sync/Replaced event, so isSync is true + + if accessor, err := meta.Accessor(new); err == nil { + if oldAccessor, err := meta.Accessor(old); err == nil { + // Events that didn't change resourceVersion are treated as resync events + // and only propagated to listeners that requested resync + isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion() } } - return nil + + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(new) + s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnDelete(old interface{}) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.processor.distribute(deleteNotification{oldObj: old}, false) +} + +// IsStopped reports whether the informer has already been stopped +func (s *sharedIndexInformer) IsStopped() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.stopped +} + +func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + // in order to safely remove, we have to + // 1. stop sending add/update/delete notifications + // 2. remove and stop listener + // 3. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + return s.processor.removeListener(handle) } // sharedProcessor has a collection of processorListener and can @@ -588,39 +711,85 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { type sharedProcessor struct { listenersStarted bool listenersLock sync.RWMutex - listeners []*processorListener - syncingListeners []*processorListener - clock clock.Clock - wg wait.Group + // Map from listeners to whether or not they are currently syncing + listeners map[*processorListener]bool + clock clock.Clock + wg wait.Group +} + +func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if p.listeners == nil { + return nil + } + + if result, ok := registration.(*processorListener); ok { + if _, exists := p.listeners[result]; exists { + return result + } + } + + return nil } -func (p *sharedProcessor) addListener(listener *processorListener) { +func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration { p.listenersLock.Lock() defer p.listenersLock.Unlock() - p.addListenerLocked(listener) + if p.listeners == nil { + p.listeners = make(map[*processorListener]bool) + } + + p.listeners[listener] = true + if p.listenersStarted { p.wg.Start(listener.run) p.wg.Start(listener.pop) } + + return listener } -func (p *sharedProcessor) addListenerLocked(listener *processorListener) { - p.listeners = append(p.listeners, listener) - p.syncingListeners = append(p.syncingListeners, listener) +func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + listener, ok := handle.(*processorListener) + if !ok { + return fmt.Errorf("invalid key type %t", handle) + } else if p.listeners == nil { + // No listeners are registered, do nothing + return nil + } else if _, exists := p.listeners[listener]; !exists { + // Listener is not registered, just do nothing + return nil + } + + delete(p.listeners, listener) + + if p.listenersStarted { + close(listener.addCh) + } + + return nil } func (p *sharedProcessor) distribute(obj interface{}, sync bool) { p.listenersLock.RLock() defer p.listenersLock.RUnlock() - if sync { - for _, listener := range p.syncingListeners { + for listener, isSyncing := range p.listeners { + switch { + case !sync: + // non-sync messages are delivered to every listener listener.add(obj) - } - } else { - for _, listener := range p.listeners { + case isSyncing: + // sync messages are delivered to every syncing listener listener.add(obj) + default: + // skipping a sync obj for a non-syncing listener } } } @@ -629,18 +798,27 @@ func (p *sharedProcessor) run(stopCh <-chan struct{}) { func() { p.listenersLock.RLock() defer p.listenersLock.RUnlock() - for _, listener := range p.listeners { + for listener := range p.listeners { p.wg.Start(listener.run) p.wg.Start(listener.pop) } p.listenersStarted = true }() <-stopCh - p.listenersLock.RLock() - defer p.listenersLock.RUnlock() - for _, listener := range p.listeners { + + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + for listener := range p.listeners { close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop } + + // Wipe out list of listeners since they are now closed + // (processorListener cannot be re-used) + p.listeners = nil + + // Reset to false since no listeners are running + p.listenersStarted = false + p.wg.Wait() // Wait for all .pop() and .run() to stop } @@ -650,16 +828,16 @@ func (p *sharedProcessor) shouldResync() bool { p.listenersLock.Lock() defer p.listenersLock.Unlock() - p.syncingListeners = []*processorListener{} - resyncNeeded := false now := p.clock.Now() - for _, listener := range p.listeners { + for listener := range p.listeners { // need to loop through all the listeners to see if they need to resync so we can prepare any // listeners that are going to be resyncing. - if listener.shouldResync(now) { + shouldResync := listener.shouldResync(now) + p.listeners[listener] = shouldResync + + if shouldResync { resyncNeeded = true - p.syncingListeners = append(p.syncingListeners, listener) listener.determineNextResync(now) } } @@ -670,8 +848,9 @@ func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Durati p.listenersLock.RLock() defer p.listenersLock.RUnlock() - for _, listener := range p.listeners { - resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod) + for listener := range p.listeners { + resyncPeriod := determineResyncPeriod( + listener.requestedResyncPeriod, resyncCheckPeriod) listener.setResyncPeriod(resyncPeriod) } } @@ -693,6 +872,8 @@ type processorListener struct { handler ResourceEventHandler + syncTracker *synctrack.SingleFileTracker + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications // added until we OOM. @@ -723,11 +904,18 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { +// HasSynced returns true if the source informer has synced, and all +// corresponding events have been delivered. +func (p *processorListener) HasSynced() bool { + return p.syncTracker.HasSynced() +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener { ret := &processorListener{ nextCh: make(chan interface{}), addCh: make(chan interface{}), handler: handler, + syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced}, pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, @@ -739,6 +927,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res } func (p *processorListener) add(notification interface{}) { + if a, ok := notification.(addNotification); ok && a.isInInitialList { + p.syncTracker.Start() + } p.addCh <- notification } @@ -784,7 +975,10 @@ func (p *processorListener) run() { case updateNotification: p.handler.OnUpdate(notification.oldObj, notification.newObj) case addNotification: - p.handler.OnAdd(notification.newObj) + p.handler.OnAdd(notification.newObj, notification.isInInitialList) + if notification.isInInitialList { + p.syncTracker.Finished() + } case deleteNotification: p.handler.OnDelete(notification.oldObj) default: diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go index 24ffabab7..5cc3f42ec 100644 --- a/vendor/k8s.io/client-go/tools/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Store is a generic object storage and processing interface. A @@ -99,20 +100,38 @@ type ExplicitKey string // The key uses the format / unless is empty, then // it's just . // -// TODO: replace key-as-string with a key-as-struct so that this -// packing/unpacking won't be necessary. +// Clients that want a structured alternative can use ObjectToName or MetaObjectToName. +// Note: this would not be a client that wants a key for a Store because those are +// necessarily strings. +// +// TODO maybe some day?: change Store to be keyed differently func MetaNamespaceKeyFunc(obj interface{}) (string, error) { if key, ok := obj.(ExplicitKey); ok { return string(key), nil } + objName, err := ObjectToName(obj) + if err != nil { + return "", err + } + return objName.String(), nil +} + +// ObjectToName returns the structured name for the given object, +// if indeed it can be viewed as a metav1.Object. +func ObjectToName(obj interface{}) (ObjectName, error) { meta, err := meta.Accessor(obj) if err != nil { - return "", fmt.Errorf("object has no meta: %v", err) + return ObjectName{}, fmt.Errorf("object has no meta: %v", err) } - if len(meta.GetNamespace()) > 0 { - return meta.GetNamespace() + "/" + meta.GetName(), nil + return MetaObjectToName(meta), nil +} + +// MetaObjectToName returns the structured name for the given object +func MetaObjectToName(obj metav1.Object) ObjectName { + if len(obj.GetNamespace()) > 0 { + return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()} } - return meta.GetName(), nil + return ObjectName{Namespace: "", Name: obj.GetName()} } // SplitMetaNamespaceKey returns the namespace and name that @@ -199,8 +218,11 @@ func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) return c.cacheStorage.Index(indexName, obj) } -func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) { - return c.cacheStorage.IndexKeys(indexName, indexKey) +// IndexKeys returns the storage keys of the stored objects whose set of +// indexed values for the named index includes the given indexed value. +// The returned keys are suitable to pass to GetByKey(). +func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) { + return c.cacheStorage.IndexKeys(indexName, indexedValue) } // ListIndexFuncValues returns the list of generated values of an Index func @@ -208,8 +230,10 @@ func (c *cache) ListIndexFuncValues(indexName string) []string { return c.cacheStorage.ListIndexFuncValues(indexName) } -func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) { - return c.cacheStorage.ByIndex(indexName, indexKey) +// ByIndex returns the stored objects whose set of indexed values +// for the named index includes the given indexed value. +func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + return c.cacheStorage.ByIndex(indexName, indexedValue) } func (c *cache) AddIndexers(newIndexers Indexers) error { diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go new file mode 100644 index 000000000..ce51da9af --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package synctrack + +import ( + "sync" + "sync/atomic" +) + +// Lazy defers the computation of `Evaluate` to when it is necessary. It is +// possible that Evaluate will be called in parallel from multiple goroutines. +type Lazy[T any] struct { + Evaluate func() (T, error) + + cache atomic.Pointer[cacheEntry[T]] +} + +type cacheEntry[T any] struct { + eval func() (T, error) + lock sync.RWMutex + result *T +} + +func (e *cacheEntry[T]) get() (T, error) { + if cur := func() *T { + e.lock.RLock() + defer e.lock.RUnlock() + return e.result + }(); cur != nil { + return *cur, nil + } + + e.lock.Lock() + defer e.lock.Unlock() + if e.result != nil { + return *e.result, nil + } + r, err := e.eval() + if err == nil { + e.result = &r + } + return r, err +} + +func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] { + return &cacheEntry[T]{eval: z.Evaluate} +} + +// Notify should be called when something has changed necessitating a new call +// to Evaluate. +func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) } + +// Get should be called to get the current result of a call to Evaluate. If the +// current cached value is stale (due to a call to Notify), then Evaluate will +// be called synchronously. If subsequent calls to Get happen (without another +// Notify), they will all wait for the same return value. +// +// Error returns are not cached and will cause multiple calls to evaluate! +func (z *Lazy[T]) Get() (T, error) { + e := z.cache.Load() + if e == nil { + // Since we don't force a constructor, nil is a possible value. + // If multiple Gets race to set this, the swap makes sure only + // one wins. + z.cache.CompareAndSwap(nil, z.newCacheEntry()) + e = z.cache.Load() + } + return e.get() +} diff --git a/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go new file mode 100644 index 000000000..3fa2beb6b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go @@ -0,0 +1,120 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package synctrack contains utilities for helping controllers track whether +// they are "synced" or not, that is, whether they have processed all items +// from the informer's initial list. +package synctrack + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// AsyncTracker helps propagate HasSynced in the face of multiple worker threads. +type AsyncTracker[T comparable] struct { + UpstreamHasSynced func() bool + + lock sync.Mutex + waiting sets.Set[T] +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *AsyncTracker[T]) Start(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting == nil { + t.waiting = sets.New[T](key) + } else { + t.waiting.Insert(key) + } +} + +// Finished should be called when finished processing a key which was part of +// the initial list. Since keys are tracked individually, nothing bad happens +// if you call Finished without a corresponding call to Start. This makes it +// easier to use this in combination with e.g. queues which don't make it easy +// to plumb through the isInInitialList boolean. +func (t *AsyncTracker[T]) Finished(key T) { + t.lock.Lock() + defer t.lock.Unlock() + if t.waiting != nil { + t.waiting.Delete(key) + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *AsyncTracker[T]) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we can't hold our lock while + // waiting on that or a user is likely to get a deadlock. + if !t.UpstreamHasSynced() { + return false + } + t.lock.Lock() + defer t.lock.Unlock() + return t.waiting.Len() == 0 +} + +// SingleFileTracker helps propagate HasSynced when events are processed in +// order (i.e. via a queue). +type SingleFileTracker struct { + // Important: count is used with atomic operations so it must be 64-bit + // aligned, otherwise atomic operations will panic. Having it at the top of + // the struct will guarantee that, even on 32-bit arches. + // See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information. + count int64 + + UpstreamHasSynced func() bool +} + +// Start should be called prior to processing each key which is part of the +// initial list. +func (t *SingleFileTracker) Start() { + atomic.AddInt64(&t.count, 1) +} + +// Finished should be called when finished processing a key which was part of +// the initial list. You must never call Finished() before (or without) its +// corresponding Start(), that is a logic error that could cause HasSynced to +// return a wrong value. To help you notice this should it happen, Finished() +// will panic if the internal counter goes negative. +func (t *SingleFileTracker) Finished() { + result := atomic.AddInt64(&t.count, -1) + if result < 0 { + panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value") + } +} + +// HasSynced returns true if the source is synced and every key present in the +// initial list has been processed. This relies on the source not considering +// itself synced until *after* it has delivered the notification for the last +// key, and that notification handler must have called Start. +func (t *SingleFileTracker) HasSynced() bool { + // Call UpstreamHasSynced first: it might take a lock, which might take + // a significant amount of time, and we don't want to then act on a + // stale count value. + if !t.UpstreamHasSynced() { + return false + } + return atomic.LoadInt64(&t.count) <= 0 +} diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 71d909d49..7a4df0e1b 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -47,35 +47,190 @@ type ThreadSafeStore interface { ListKeys() []string Replace(map[string]interface{}, string) Index(indexName string, obj interface{}) ([]interface{}, error) - IndexKeys(indexName, indexKey string) ([]string, error) + IndexKeys(indexName, indexedValue string) ([]string, error) ListIndexFuncValues(name string) []string - ByIndex(indexName, indexKey string) ([]interface{}, error) + ByIndex(indexName, indexedValue string) ([]interface{}, error) GetIndexers() Indexers - // AddIndexers adds more indexers to this store. If you call this after you already have data - // in the store, the results are undefined. + // AddIndexers adds more indexers to this store. This supports adding indexes after the store already has items. AddIndexers(newIndexers Indexers) error // Resync is a no-op and is deprecated Resync() error } +// storeIndex implements the indexing functionality for Store interface +type storeIndex struct { + // indexers maps a name to an IndexFunc + indexers Indexers + // indices maps a name to an Index + indices Indices +} + +func (i *storeIndex) reset() { + i.indices = Indices{} +} + +func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexedValues, err := indexFunc(obj) + if err != nil { + return nil, err + } + index := i.indices[indexName] + + var storeKeySet sets.String + if len(indexedValues) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + storeKeySet = index[indexedValues[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + storeKeySet = sets.String{} + for _, indexedValue := range indexedValues { + for key := range index[indexedValue] { + storeKeySet.Insert(key) + } + } + } + + return storeKeySet, nil +} + +func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := i.indices[indexName] + return index[indexedValue], nil +} + +func (i *storeIndex) getIndexValues(indexName string) []string { + index := i.indices[indexName] + names := make([]string, 0, len(index)) + for key := range index { + names = append(names, key) + } + return names +} + +func (i *storeIndex) addIndexers(newIndexers Indexers) error { + oldKeys := sets.StringKeySet(i.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + i.indexers[k] = v + } + return nil +} + +// updateSingleIndex modifies the objects location in the named index: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj +// updateSingleIndex must be called from a function that already has a lock on the cache +func (i *storeIndex) updateSingleIndex(name string, oldObj interface{}, newObj interface{}, key string) { + var oldIndexValues, indexValues []string + indexFunc, ok := i.indexers[name] + if !ok { + // Should never happen. Caller is responsible for ensuring this exists, and should call with lock + // held to avoid any races. + panic(fmt.Errorf("indexer %q does not exist", name)) + } + if oldObj != nil { + var err error + oldIndexValues, err = indexFunc(oldObj) + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + } else { + oldIndexValues = oldIndexValues[:0] + } + + if newObj != nil { + var err error + indexValues, err = indexFunc(newObj) + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + } else { + indexValues = indexValues[:0] + } + + index := i.indices[name] + if index == nil { + index = Index{} + i.indices[name] = index + } + + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed + return + } + + for _, value := range oldIndexValues { + i.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + i.addKeyToIndex(key, value, index) + } +} + +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) { + for name := range i.indexers { + i.updateSingleIndex(name, oldObj, newObj, key) + } +} + +func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) +} + +func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + return + } + set.Delete(key) + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } +} + // threadSafeMap implements ThreadSafeStore type threadSafeMap struct { lock sync.RWMutex items map[string]interface{} - // indexers maps a name to an IndexFunc - indexers Indexers - // indices maps a name to an Index - indices Indices + // index implements the indexing functionality + index *storeIndex } func (c *threadSafeMap) Add(key string, obj interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - oldObject := c.items[key] - c.items[key] = obj - c.updateIndices(oldObject, obj, key) + c.Update(key, obj) } func (c *threadSafeMap) Update(key string, obj interface{}) { @@ -83,14 +238,14 @@ func (c *threadSafeMap) Update(key string, obj interface{}) { defer c.lock.Unlock() oldObject := c.items[key] c.items[key] = obj - c.updateIndices(oldObject, obj, key) + c.index.updateIndices(oldObject, obj, key) } func (c *threadSafeMap) Delete(key string) { c.lock.Lock() defer c.lock.Unlock() if obj, exists := c.items[key]; exists { - c.updateIndices(obj, nil, key) + c.index.updateIndices(obj, nil, key) delete(c.items, key) } } @@ -130,9 +285,9 @@ func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion st c.items = items // rebuild any index - c.indices = Indices{} + c.index.reset() for key, item := range c.items { - c.updateIndices(nil, item, key) + c.index.updateIndices(nil, item, key) } } @@ -142,32 +297,10 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, c.lock.RLock() defer c.lock.RUnlock() - indexFunc := c.indexers[indexName] - if indexFunc == nil { - return nil, fmt.Errorf("Index with name %s does not exist", indexName) - } - - indexedValues, err := indexFunc(obj) + storeKeySet, err := c.index.getKeysFromIndex(indexName, obj) if err != nil { return nil, err } - index := c.indices[indexName] - - var storeKeySet sets.String - if len(indexedValues) == 1 { - // In majority of cases, there is exactly one value matching. - // Optimize the most common path - deduping is not needed here. - storeKeySet = index[indexedValues[0]] - } else { - // Need to de-dupe the return list. - // Since multiple keys are allowed, this can happen. - storeKeySet = sets.String{} - for _, indexedValue := range indexedValues { - for key := range index[indexedValue] { - storeKeySet.Insert(key) - } - } - } list := make([]interface{}, 0, storeKeySet.Len()) for storeKey := range storeKeySet { @@ -181,14 +314,10 @@ func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, c.lock.RLock() defer c.lock.RUnlock() - indexFunc := c.indexers[indexName] - if indexFunc == nil { - return nil, fmt.Errorf("Index with name %s does not exist", indexName) + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err } - - index := c.indices[indexName] - - set := index[indexedValue] list := make([]interface{}, 0, set.Len()) for key := range set { list = append(list, c.items[key]) @@ -203,14 +332,10 @@ func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, err c.lock.RLock() defer c.lock.RUnlock() - indexFunc := c.indexers[indexName] - if indexFunc == nil { - return nil, fmt.Errorf("Index with name %s does not exist", indexName) + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err } - - index := c.indices[indexName] - - set := index[indexedValue] return set.List(), nil } @@ -218,107 +343,29 @@ func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { c.lock.RLock() defer c.lock.RUnlock() - index := c.indices[indexName] - names := make([]string, 0, len(index)) - for key := range index { - names = append(names, key) - } - return names + return c.index.getIndexValues(indexName) } func (c *threadSafeMap) GetIndexers() Indexers { - return c.indexers + return c.index.indexers } func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { c.lock.Lock() defer c.lock.Unlock() - if len(c.items) > 0 { - return fmt.Errorf("cannot add indexers to running index") + if err := c.index.addIndexers(newIndexers); err != nil { + return err } - oldKeys := sets.StringKeySet(c.indexers) - newKeys := sets.StringKeySet(newIndexers) - - if oldKeys.HasAny(newKeys.List()...) { - return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) - } - - for k, v := range newIndexers { - c.indexers[k] = v - } - return nil -} - -// updateIndices modifies the objects location in the managed indexes: -// - for create you must provide only the newObj -// - for update you must provide both the oldObj and the newObj -// - for delete you must provide only the oldObj -// updateIndices must be called from a function that already has a lock on the cache -func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { - var oldIndexValues, indexValues []string - var err error - for name, indexFunc := range c.indexers { - if oldObj != nil { - oldIndexValues, err = indexFunc(oldObj) - } else { - oldIndexValues = oldIndexValues[:0] - } - if err != nil { - panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) - } - - if newObj != nil { - indexValues, err = indexFunc(newObj) - } else { - indexValues = indexValues[:0] - } - if err != nil { - panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) - } - - index := c.indices[name] - if index == nil { - index = Index{} - c.indices[name] = index - } - - if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { - // We optimize for the most common case where indexFunc returns a single value which has not been changed - continue - } - - for _, value := range oldIndexValues { - c.deleteKeyFromIndex(key, value, index) - } - for _, value := range indexValues { - c.addKeyToIndex(key, value, index) + // If there are already items, index them + for key, item := range c.items { + for name := range newIndexers { + c.index.updateSingleIndex(name, nil, item, key) } } -} - -func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) { - set := index[indexValue] - if set == nil { - set = sets.String{} - index[indexValue] = set - } - set.Insert(key) -} -func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) { - set := index[indexValue] - if set == nil { - return - } - set.Delete(key) - // If we don't delete the set when zero, indices with high cardinality - // short lived resources can cause memory to increase over time from - // unused empty sets. See `kubernetes/kubernetes/issues/84959`. - if len(set) == 0 { - delete(index, indexValue) - } + return nil } func (c *threadSafeMap) Resync() error { @@ -329,8 +376,10 @@ func (c *threadSafeMap) Resync() error { // NewThreadSafeStore creates a new instance of ThreadSafeStore. func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { return &threadSafeMap{ - items: map[string]interface{}{}, - indexers: indexers, - indices: indices, + items: map[string]interface{}{}, + index: &storeIndex{ + indexers: indexers, + indices: indices, + }, } } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 5871575a6..fd913a308 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package -package api +package api // import "k8s.io/client-go/tools/clientcmd/api" diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go index d677d6519..261dcacb5 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -20,10 +20,10 @@ import ( "encoding/base64" "errors" "fmt" - "io/ioutil" "os" - "path" "path/filepath" + "reflect" + "strings" ) func init() { @@ -82,21 +82,21 @@ func MinifyConfig(config *Config) error { } var ( - redactedBytes []byte dataOmittedBytes []byte + redactedBytes []byte ) -// Flatten redacts raw data entries from the config object for a human-readable view. +// ShortenConfig redacts raw data entries from the config object for a human-readable view. func ShortenConfig(config *Config) { - // trick json encoder into printing a human readable string in the raw data + // trick json encoder into printing a human-readable string in the raw data // by base64 decoding what we want to print. Relies on implementation of // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte for key, authInfo := range config.AuthInfos { if len(authInfo.ClientKeyData) > 0 { - authInfo.ClientKeyData = redactedBytes + authInfo.ClientKeyData = dataOmittedBytes } if len(authInfo.ClientCertificateData) > 0 { - authInfo.ClientCertificateData = redactedBytes + authInfo.ClientCertificateData = dataOmittedBytes } if len(authInfo.Token) > 0 { authInfo.Token = "REDACTED" @@ -111,10 +111,10 @@ func ShortenConfig(config *Config) { } } -// Flatten changes the config object into a self contained config (useful for making secrets) +// FlattenConfig changes the config object into a self-contained config (useful for making secrets) func FlattenConfig(config *Config) error { for key, authInfo := range config.AuthInfos { - baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "") if err != nil { return err } @@ -129,7 +129,7 @@ func FlattenConfig(config *Config) error { config.AuthInfos[key] = authInfo } for key, cluster := range config.Clusters { - baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "") if err != nil { return err } @@ -152,7 +152,7 @@ func FlattenContent(path *string, contents *[]byte, baseDir string) error { var err error absPath := ResolvePath(*path, baseDir) - *contents, err = ioutil.ReadFile(absPath) + *contents, err = os.ReadFile(absPath) if err != nil { return err } @@ -189,3 +189,77 @@ func MakeAbs(path, base string) (string, error) { } return filepath.Join(base, path), nil } + +// RedactSecrets replaces any sensitive values with REDACTED +func RedactSecrets(config *Config) error { + return redactSecrets(reflect.ValueOf(config), false) +} + +func redactSecrets(curr reflect.Value, redact bool) error { + redactedBytes = []byte("REDACTED") + if !curr.IsValid() { + return nil + } + + actualCurrValue := curr + if curr.Kind() == reflect.Ptr { + actualCurrValue = curr.Elem() + } + + switch actualCurrValue.Kind() { + case reflect.Map: + for _, v := range actualCurrValue.MapKeys() { + err := redactSecrets(actualCurrValue.MapIndex(v), false) + if err != nil { + return err + } + } + return nil + + case reflect.String: + if redact { + if !actualCurrValue.IsZero() { + actualCurrValue.SetString("REDACTED") + } + } + return nil + + case reflect.Slice: + if actualCurrValue.Type() == reflect.TypeOf([]byte{}) && redact { + if !actualCurrValue.IsNil() { + actualCurrValue.SetBytes(redactedBytes) + } + return nil + } + for i := 0; i < actualCurrValue.Len(); i++ { + err := redactSecrets(actualCurrValue.Index(i), false) + if err != nil { + return err + } + } + return nil + + case reflect.Struct: + for fieldIndex := 0; fieldIndex < actualCurrValue.NumField(); fieldIndex++ { + currFieldValue := actualCurrValue.Field(fieldIndex) + currFieldType := actualCurrValue.Type().Field(fieldIndex) + currYamlTag := currFieldType.Tag.Get("datapolicy") + currFieldTypeYamlName := strings.Split(currYamlTag, ",")[0] + if currFieldTypeYamlName != "" { + err := redactSecrets(currFieldValue, true) + if err != nil { + return err + } + } else { + err := redactSecrets(currFieldValue, false) + if err != nil { + return err + } + } + } + return nil + + default: + return nil + } +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 44a2fb94b..ae8b8c703 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -67,7 +67,7 @@ type Preferences struct { type Cluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used. @@ -93,6 +93,11 @@ type Cluster struct { // attach, port forward). // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions map[string]runtime.Object `json:"extensions,omitempty"` @@ -102,7 +107,7 @@ type Cluster struct { type AuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // ClientCertificate is the path to a client cert file for TLS. // +optional ClientCertificate string `json:"client-certificate,omitempty"` @@ -154,7 +159,7 @@ type AuthInfo struct { type Context struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. // +k8s:conversion-gen=false - LocationOfOrigin string + LocationOfOrigin string `json:"-"` // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` // AuthInfo is the name of the authInfo for this context @@ -247,7 +252,7 @@ type ExecConfig struct { // recommended as one of the prime benefits of exec plugins is that no secrets need // to be stored directly in the kubeconfig. // +k8s:conversion-gen=false - Config runtime.Object + Config runtime.Object `json:"-"` // InteractiveMode determines this plugin's relationship with standard input. Valid // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this @@ -259,7 +264,7 @@ type ExecConfig struct { // client.authentication.k8s.io/v1beta1, then this field is optional and defaults // to "IfAvailable" when unset. Otherwise, this field is required. // +optional - InteractiveMode ExecInteractiveMode + InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"` // StdinUnavailable indicates whether the exec authenticator can pass standard // input through to this exec plugin. For example, a higher level entity might be using @@ -267,14 +272,14 @@ type ExecConfig struct { // plugin to use standard input. This is kept here in order to keep all of the exec configuration // together, but it is never serialized. // +k8s:conversion-gen=false - StdinUnavailable bool + StdinUnavailable bool `json:"-"` // StdinUnavailableMessage is an optional message to be displayed when the exec authenticator // cannot successfully run this exec plugin because it needs to use standard input and // StdinUnavailable is true. For example, a process that is already using standard input to // read user instructions might set this to "used by my-program to read user instructions". // +k8s:conversion-gen=false - StdinUnavailableMessage string + StdinUnavailableMessage string `json:"-"` } var _ fmt.Stringer = new(ExecConfig) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index 3ccdebc1c..9e483e9d7 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -18,4 +18,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:defaulter-gen=Kind -package v1 +package v1 // import "k8s.io/client-go/tools/clientcmd/api/v1" diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go index 757ed817b..5018a72b1 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -86,6 +86,11 @@ type Cluster struct { // attach, port forward). // +optional ProxyURL string `json:"proxy-url,omitempty"` + // DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful + // to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on + // compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296. + // +optional + DisableCompression bool `json:"disable-compression,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // +optional Extensions []NamedExtension `json:"extensions,omitempty"` diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go index a13bae64d..bdedc1666 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go @@ -257,6 +257,7 @@ func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conv out.CertificateAuthority = in.CertificateAuthority out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil { return err } @@ -276,6 +277,7 @@ func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conv out.CertificateAuthority = in.CertificateAuthority out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData)) out.ProxyURL = in.ProxyURL + out.DisableCompression = in.DisableCompression if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil { return err } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go index 5153a95a2..ce951e88d 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "golang.org/x/term" @@ -59,7 +58,7 @@ func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { if err != nil { return &auth, err } - err = ioutil.WriteFile(path, data, 0600) + err = os.WriteFile(path, data, 0600) return &auth, err } authPtr, err := clientauth.LoadFromFile(path) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index cc37c9fbf..952f6d7eb 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -19,7 +19,6 @@ package clientcmd import ( "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -73,6 +72,13 @@ type ClientConfig interface { ConfigAccess() ConfigAccess } +// OverridingClientConfig is used to enable overrriding the raw KubeConfig +type OverridingClientConfig interface { + ClientConfig + // MergedRawConfig return the RawConfig merged with all overrides. + MergedRawConfig() (clientcmdapi.Config, error) +} + type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister type promptedCredentials struct { @@ -92,22 +98,22 @@ type DirectClientConfig struct { } // NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name -func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { +func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) OverridingClientConfig { return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} } // NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { +func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) OverridingClientConfig { return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} } // NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags -func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { +func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) OverridingClientConfig { return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} } // NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig -func NewClientConfigFromBytes(configBytes []byte) (ClientConfig, error) { +func NewClientConfigFromBytes(configBytes []byte) (OverridingClientConfig, error) { config, err := Load(configBytes) if err != nil { return nil, err @@ -130,6 +136,40 @@ func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { return config.config, nil } +// MergedRawConfig returns the raw kube config merged with the overrides +func (config *DirectClientConfig) MergedRawConfig() (clientcmdapi.Config, error) { + if err := config.ConfirmUsable(); err != nil { + return clientcmdapi.Config{}, err + } + merged := config.config.DeepCopy() + + // set the AuthInfo merged with overrides in the merged config + mergedAuthInfo, err := config.getAuthInfo() + if err != nil { + return clientcmdapi.Config{}, err + } + mergedAuthInfoName, _ := config.getAuthInfoName() + merged.AuthInfos[mergedAuthInfoName] = &mergedAuthInfo + + // set the Context merged with overrides in the merged config + mergedContext, err := config.getContext() + if err != nil { + return clientcmdapi.Config{}, err + } + mergedContextName, _ := config.getContextName() + merged.Contexts[mergedContextName] = &mergedContext + merged.CurrentContext = mergedContextName + + // set the Cluster merged with overrides in the merged config + configClusterInfo, err := config.getCluster() + if err != nil { + return clientcmdapi.Config{}, err + } + configClusterName, _ := config.getClusterName() + merged.Clusters[configClusterName] = &configClusterInfo + return *merged, nil +} + // ClientConfig implements ClientConfig func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { // check that getAuthInfo, getContext, and getCluster do not return an error. @@ -165,6 +205,8 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { clientConfig.Proxy = http.ProxyURL(u) } + clientConfig.DisableCompression = configClusterInfo.DisableCompression + if config.overrides != nil && len(config.overrides.Timeout) > 0 { timeout, err := ParseTimeout(config.overrides.Timeout) if err != nil { @@ -246,7 +288,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI mergedConfig.BearerToken = configAuthInfo.Token mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile) if err != nil { return nil, err } @@ -586,7 +628,7 @@ func (config *inClusterClientConfig) Namespace() (string, bool, error) { } // Fall back to the namespace associated with the service account token, if available - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { return ns, false, nil } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go index 31f896316..2cd213ccb 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -19,7 +19,6 @@ package clientcmd import ( "errors" "os" - "path" "path/filepath" "reflect" "sort" @@ -148,7 +147,7 @@ func NewDefaultPathOptions() *PathOptions { EnvVar: RecommendedConfigPathEnvVar, ExplicitFileFlag: RecommendedConfigPathFlag, - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName), LoadingRules: NewDefaultClientConfigLoadingRules(), } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go index 4e301332d..b75737f1c 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -18,7 +18,6 @@ package clientcmd import ( "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -129,6 +128,28 @@ type ClientConfigLoadingRules struct { // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. // In case of missing files, it warns the user about the missing files. WarnIfAllMissing bool + + // Warner is the warning log callback to use in case of missing files. + Warner WarningHandler +} + +// WarningHandler allows to set the logging function to use +type WarningHandler func(error) + +func (handler WarningHandler) Warn(err error) { + if handler == nil { + klog.V(1).Info(err) + } else { + handler(err) + } +} + +type MissingConfigError struct { + Missing []string +} + +func (c MissingConfigError) Error() string { + return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", ")) } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -220,7 +241,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { } if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { - klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) + rules.Warner.Warn(MissingConfigError{Missing: missingList}) } // first merge all of our maps @@ -283,12 +304,12 @@ func (rules *ClientConfigLoadingRules) Migrate() error { return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) } - data, err := ioutil.ReadFile(source) + data, err := os.ReadFile(source) if err != nil { return err } // destination is created with mode 0666 before umask - err = ioutil.WriteFile(destination, data, 0666) + err = os.WriteFile(destination, data, 0666) if err != nil { return err } @@ -363,7 +384,7 @@ func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) + kubeconfigBytes, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -429,7 +450,7 @@ func WriteToFile(config clientcmdapi.Config, filename string) error { } } - if err := ioutil.WriteFile(filename, content, 0600); err != nil { + if err := os.WriteFile(filename, content, 0600); err != nil { return err } return nil diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 10744156b..0fc2fd0a0 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -49,12 +49,12 @@ type InClusterConfig interface { Possible() bool } -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} } -// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go index ff643cc13..483e51532 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -73,6 +73,8 @@ type ClusterOverrideFlags struct { CertificateAuthority FlagInfo InsecureSkipTLSVerify FlagInfo TLSServerName FlagInfo + ProxyURL FlagInfo + DisableCompression FlagInfo } // FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to @@ -142,24 +144,26 @@ func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { } const ( - FlagClusterName = "cluster" - FlagAuthInfoName = "user" - FlagContext = "context" - FlagNamespace = "namespace" - FlagAPIServer = "server" - FlagTLSServerName = "tls-server-name" - FlagInsecure = "insecure-skip-tls-verify" - FlagCertFile = "client-certificate" - FlagKeyFile = "client-key" - FlagCAFile = "certificate-authority" - FlagEmbedCerts = "embed-certs" - FlagBearerToken = "token" - FlagImpersonate = "as" - FlagImpersonateUID = "as-uid" - FlagImpersonateGroup = "as-group" - FlagUsername = "username" - FlagPassword = "password" - FlagTimeout = "request-timeout" + FlagClusterName = "cluster" + FlagAuthInfoName = "user" + FlagContext = "context" + FlagNamespace = "namespace" + FlagAPIServer = "server" + FlagTLSServerName = "tls-server-name" + FlagInsecure = "insecure-skip-tls-verify" + FlagCertFile = "client-certificate" + FlagKeyFile = "client-key" + FlagCAFile = "certificate-authority" + FlagEmbedCerts = "embed-certs" + FlagBearerToken = "token" + FlagImpersonate = "as" + FlagImpersonateUID = "as-uid" + FlagImpersonateGroup = "as-group" + FlagUsername = "username" + FlagPassword = "password" + FlagTimeout = "request-timeout" + FlagProxyURL = "proxy-url" + FlagDisableCompression = "disable-compression" ) // RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing @@ -195,6 +199,8 @@ func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"}, InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, TLSServerName: FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."}, + ProxyURL: FlagInfo{prefix + FlagProxyURL, "", "", "If provided, this URL will be used to connect via proxy"}, + DisableCompression: FlagInfo{prefix + FlagDisableCompression, "", "", "If true, opt-out of response compression for all requests to the server"}, } } @@ -234,6 +240,8 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority) flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName) + flagNames.ProxyURL.BindStringFlag(flags, &clusterInfo.ProxyURL) + flagNames.DisableCompression.BindBoolFlag(flags, &clusterInfo.DisableCompression) } // BindFlags is a convenience method to bind the specified flags to their associated variables diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go index 2ae1eb706..088972ef6 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -204,8 +204,19 @@ func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { if exists { validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) - validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) + + // Default to empty users and clusters and let the validation function report an error. + authInfo := config.AuthInfos[context.AuthInfo] + if authInfo == nil { + authInfo = &clientcmdapi.AuthInfo{} + } + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *authInfo)...) + + cluster := config.Clusters[context.Cluster] + if cluster == nil { + cluster = &clientcmdapi.Cluster{} + } + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *cluster)...) } return newErrConfigurationInvalid(validationErrors) diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS index 77bcb5090..2c9488a5f 100644 --- a/vendor/k8s.io/client-go/tools/metrics/OWNERS +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -1,6 +1,5 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- wojtek-t -- krousey -- jayunit100 + - wojtek-t + - jayunit100 diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go index 597dc8e53..99d3d8e23 100644 --- a/vendor/k8s.io/client-go/tools/metrics/metrics.go +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -42,6 +42,15 @@ type LatencyMetric interface { Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) } +type ResolverLatencyMetric interface { + Observe(ctx context.Context, host string, latency time.Duration) +} + +// SizeMetric observes client response size partitioned by verb and host. +type SizeMetric interface { + Observe(ctx context.Context, verb string, host string, size float64) +} + // ResultMetric counts response codes partitioned by method and host. type ResultMetric interface { Increment(ctx context.Context, code string, method string, host string) @@ -53,6 +62,23 @@ type CallsMetric interface { Increment(exitCode int, callStatus string) } +// RetryMetric counts the number of retries sent to the server +// partitioned by code, method, and host. +type RetryMetric interface { + IncrementRetry(ctx context.Context, code string, method string, host string) +} + +// TransportCacheMetric shows the number of entries in the internal transport cache +type TransportCacheMetric interface { + Observe(value int) +} + +// TransportCreateCallsMetric counts the number of times a transport is created +// partitioned by the result of the cache: hit, miss, uncacheable +type TransportCreateCallsMetric interface { + Increment(result string) +} + var ( // ClientCertExpiry is the expiry time of a client certificate ClientCertExpiry ExpiryMetric = noopExpiry{} @@ -60,6 +86,12 @@ var ( ClientCertRotationAge DurationMetric = noopDuration{} // RequestLatency is the latency metric that rest clients will update. RequestLatency LatencyMetric = noopLatency{} + // ResolverLatency is the latency metric that DNS resolver will update + ResolverLatency ResolverLatencyMetric = noopResolverLatency{} + // RequestSize is the request size metric that rest clients will update. + RequestSize SizeMetric = noopSize{} + // ResponseSize is the response size metric that rest clients will update. + ResponseSize SizeMetric = noopSize{} // RateLimiterLatency is the client side rate limiter latency metric. RateLimiterLatency LatencyMetric = noopLatency{} // RequestResult is the result metric that rest clients will update. @@ -67,6 +99,15 @@ var ( // ExecPluginCalls is the number of calls made to an exec plugin, partitioned by // exit code and call status. ExecPluginCalls CallsMetric = noopCalls{} + // RequestRetry is the retry metric that tracks the number of + // retries sent to the server. + RequestRetry RetryMetric = noopRetry{} + // TransportCacheEntries is the metric that tracks the number of entries in the + // internal transport cache. + TransportCacheEntries TransportCacheMetric = noopTransportCache{} + // TransportCreateCalls is the metric that counts the number of times a new transport + // is created + TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{} ) // RegisterOpts contains all the metrics to register. Metrics may be nil. @@ -74,9 +115,15 @@ type RegisterOpts struct { ClientCertExpiry ExpiryMetric ClientCertRotationAge DurationMetric RequestLatency LatencyMetric + ResolverLatency ResolverLatencyMetric + RequestSize SizeMetric + ResponseSize SizeMetric RateLimiterLatency LatencyMetric RequestResult ResultMetric ExecPluginCalls CallsMetric + RequestRetry RetryMetric + TransportCacheEntries TransportCacheMetric + TransportCreateCalls TransportCreateCallsMetric } // Register registers metrics for the rest client to use. This can @@ -92,6 +139,15 @@ func Register(opts RegisterOpts) { if opts.RequestLatency != nil { RequestLatency = opts.RequestLatency } + if opts.ResolverLatency != nil { + ResolverLatency = opts.ResolverLatency + } + if opts.RequestSize != nil { + RequestSize = opts.RequestSize + } + if opts.ResponseSize != nil { + ResponseSize = opts.ResponseSize + } if opts.RateLimiterLatency != nil { RateLimiterLatency = opts.RateLimiterLatency } @@ -101,6 +157,15 @@ func Register(opts RegisterOpts) { if opts.ExecPluginCalls != nil { ExecPluginCalls = opts.ExecPluginCalls } + if opts.RequestRetry != nil { + RequestRetry = opts.RequestRetry + } + if opts.TransportCacheEntries != nil { + TransportCacheEntries = opts.TransportCacheEntries + } + if opts.TransportCreateCalls != nil { + TransportCreateCalls = opts.TransportCreateCalls + } }) } @@ -116,6 +181,15 @@ type noopLatency struct{} func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {} +type noopResolverLatency struct{} + +func (n noopResolverLatency) Observe(ctx context.Context, host string, latency time.Duration) { +} + +type noopSize struct{} + +func (noopSize) Observe(context.Context, string, string, float64) {} + type noopResult struct{} func (noopResult) Increment(context.Context, string, string, string) {} @@ -123,3 +197,15 @@ func (noopResult) Increment(context.Context, string, string, string) {} type noopCalls struct{} func (noopCalls) Increment(int, string) {} + +type noopRetry struct{} + +func (noopRetry) IncrementRetry(context.Context, string, string, string) {} + +type noopTransportCache struct{} + +func (noopTransportCache) Observe(int) {} + +type noopTransportCreateCalls struct{} + +func (noopTransportCreateCalls) Increment(string) {} diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go index 805859e09..3c77cc37f 100644 --- a/vendor/k8s.io/client-go/tools/pager/pager.go +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -73,7 +73,23 @@ func New(fn ListPageFunc) *ListPager { // List returns a single list object, but attempts to retrieve smaller chunks from the // server to reduce the impact on the server. If the chunk attempt fails, it will load // the full list instead. The Limit field on options, if unset, will default to the page size. +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use ListWithAlloc instead. func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, false) +} + +// ListWithAlloc works like List, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items in the returned list are not retained, or are retained for the same duration, use List instead for memory efficiency. +func (p *ListPager) ListWithAlloc(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + return p.list(ctx, options, true) +} + +func (p *ListPager) list(ctx context.Context, options metav1.ListOptions, allocNew bool) (runtime.Object, bool, error) { if options.Limit == 0 { options.Limit = p.PageSize } @@ -123,7 +139,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti list.ResourceVersion = m.GetResourceVersion() list.SelfLink = m.GetSelfLink() } - if err := meta.EachListItem(obj, func(obj runtime.Object) error { + eachListItemFunc := meta.EachListItem + if allocNew { + eachListItemFunc = meta.EachListItemWithAlloc + } + if err := eachListItemFunc(obj, func(obj runtime.Object) error { list.Items = append(list.Items, obj) return nil }); err != nil { @@ -156,12 +176,26 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti // // Items are retrieved in chunks from the server to reduce the impact on the server with up to // ListPager.PageBufferSize chunks buffered concurrently in the background. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining the whole slice returned by p.PageFn as long as any item is referenced, +// use EachListItemWithAlloc instead. func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { return meta.EachListItem(obj, fn) }) } +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice returned by p.PageFn. +// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func (p *ListPager) EachListItemWithAlloc(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItemWithAlloc(obj, fn) + }) +} + // eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on // each list chunk. If fn returns an error, processing stops and that error is returned. If fn does // not return an error, any error encountered while retrieving the list from the server is @@ -203,6 +237,11 @@ func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.Li }() for o := range chunkC { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } err := fn(o) if err != nil { return err // any fn error should be returned immediately diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go index 442a991cc..5d4ec3743 100644 --- a/vendor/k8s.io/client-go/tools/reference/ref.go +++ b/vendor/k8s.io/client-go/tools/reference/ref.go @@ -34,7 +34,7 @@ var ( // GetReference returns an ObjectReference which refers to the given // object, or an error if the object doesn't follow the conventions // that would allow this. -// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +// TODO: should take a meta.Interface see https://issue.k8s.io/7127 func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) { if obj == nil { return nil, ErrNilObject diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS index a52176903..34adee5ec 100644 --- a/vendor/k8s.io/client-go/transport/OWNERS +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -1,9 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: -- smarterclayton -- wojtek-t -- deads2k -- liggitt -- krousey -- caesarxuchao + - smarterclayton + - wojtek-t + - deads2k + - liggitt + - caesarxuchao diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index f4a864d05..7c7f1b330 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -27,6 +27,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/metrics" ) // TlsTransportCache caches TLS http.RoundTrippers different configurations. The @@ -37,6 +38,11 @@ type tlsTransportCache struct { transports map[tlsCacheKey]*http.Transport } +// DialerStopCh is stop channel that is passed down to dynamic cert dialer. +// It's exposed as variable for testing purposes to avoid testing for goroutine +// leakages. +var DialerStopCh = wait.NeverStop + const idleConnsPerHost = 25 var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} @@ -75,11 +81,16 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { // Ensure we only create a single transport for the given TLS options c.mu.Lock() defer c.mu.Unlock() + defer metrics.TransportCacheEntries.Observe(len(c.transports)) // See if we already have a custom transport for this config if t, ok := c.transports[key]; ok { + metrics.TransportCreateCalls.Increment("hit") return t, nil } + metrics.TransportCreateCalls.Increment("miss") + } else { + metrics.TransportCreateCalls.Increment("uncacheable") } // Get the TLS options for this client config @@ -88,13 +99,13 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { return nil, err } // The options didn't require a custom TLS config - if tlsConfig == nil && config.Dial == nil && config.Proxy == nil { + if tlsConfig == nil && config.DialHolder == nil && config.Proxy == nil { return http.DefaultTransport, nil } var dial func(ctx context.Context, network, address string) (net.Conn, error) - if config.Dial != nil { - dial = config.Dial + if config.DialHolder != nil { + dial = config.DialHolder.Dial } else { dial = (&net.Dialer{ Timeout: 30 * time.Second, @@ -104,11 +115,11 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { // If we use are reloading files, we need to handle certificate rotation properly // TODO(jackkleeman): We can also add rotation here when config.HasCertCallback() is true - if config.TLS.ReloadTLSFiles { + if config.TLS.ReloadTLSFiles && tlsConfig != nil && tlsConfig.GetClientCertificate != nil { dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial) tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate dial = dynamicCertDialer.connDialer.DialContext - go dynamicCertDialer.Run(wait.NeverStop) + go dynamicCertDialer.Run(DialerStopCh) } proxy := http.ProxyFromEnvironment @@ -144,14 +155,6 @@ func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) { // cannot determine equality for functions return tlsCacheKey{}, false, nil } - if c.Dial != nil && c.DialHolder == nil { - // cannot determine equality for dial function that doesn't have non-nil DialHolder set as well - return tlsCacheKey{}, false, nil - } - if c.TLS.GetCert != nil && c.TLS.GetCertHolder == nil { - // cannot determine equality for getCert function that doesn't have non-nil GetCertHolder set as well - return tlsCacheKey{}, false, nil - } k := tlsCacheKey{ insecure: c.TLS.Insecure, diff --git a/vendor/k8s.io/client-go/transport/cache_go118.go b/vendor/k8s.io/client-go/transport/cache_go118.go new file mode 100644 index 000000000..d21d5137d --- /dev/null +++ b/vendor/k8s.io/client-go/transport/cache_go118.go @@ -0,0 +1,24 @@ +//go:build go1.18 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +// assert at compile time that tlsCacheKey is comparable in a way that will never panic at runtime. +var _ = isComparable[tlsCacheKey] + +func isComparable[T comparable]() {} diff --git a/vendor/k8s.io/client-go/transport/cert_rotation.go b/vendor/k8s.io/client-go/transport/cert_rotation.go index dc22b6ec4..e76f65812 100644 --- a/vendor/k8s.io/client-go/transport/cert_rotation.go +++ b/vendor/k8s.io/client-go/transport/cert_rotation.go @@ -47,14 +47,17 @@ type dynamicClientCert struct { connDialer *connrotation.Dialer // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert { d := &dynamicClientCert{ reload: reload, connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"}, + ), } return d diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index fd853c0b3..d8a3d64b3 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -67,11 +67,8 @@ type Config struct { // instead of setting this value directly. WrapTransport WrapperFunc - // Dial specifies the dial function for creating unencrypted TCP connections. - // If specified, this transport will be non-cacheable unless DialHolder is also set. - Dial func(ctx context.Context, network, address string) (net.Conn, error) - // DialHolder can be populated to make transport configs cacheable. - // If specified, DialHolder.Dial must be equal to Dial. + // DialHolder specifies the dial function for creating unencrypted TCP connections. + // This struct indirection is used to make transport configs cacheable. DialHolder *DialHolder // Proxy is the proxy func to be used for all requests made by this @@ -121,7 +118,7 @@ func (c *Config) HasCertAuth() bool { // HasCertCallback returns whether the configuration has certificate callback or not. func (c *Config) HasCertCallback() bool { - return c.TLS.GetCert != nil + return c.TLS.GetCertHolder != nil } // Wrap adds a transport middleware function that will give the caller @@ -153,10 +150,7 @@ type TLSConfig struct { NextProtos []string // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. - // If specified, this transport is non-cacheable unless CertHolder is populated. - GetCert func() (*tls.Certificate, error) - // CertHolder can be populated to make transport configs that set GetCert cacheable. - // If set, CertHolder.GetCert must be equal to GetCert. + // This struct indirection is used to make transport configs cacheable. GetCertHolder *GetCertHolder } diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 4c74606a9..e2d1dcc9a 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -72,7 +72,7 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { case bool(klog.V(9).Enabled()): - rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugDetailedTiming, DebugResponseHeaders) + rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugURLTiming, DebugDetailedTiming, DebugResponseHeaders) case bool(klog.V(8).Enabled()): rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus, DebugResponseHeaders) case bool(klog.V(7).Enabled()): @@ -491,7 +491,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e DNSDone: func(info httptrace.DNSDoneInfo) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.DNSLookup = time.Now().Sub(dnsStart) + reqInfo.DNSLookup = time.Since(dnsStart) klog.Infof("HTTP Trace: DNS Lookup for %s resolved to %v", host, info.Addrs) }, // Dial @@ -503,7 +503,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e ConnectDone: func(network, addr string, err error) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.Dialing = time.Now().Sub(dialStart) + reqInfo.Dialing = time.Since(dialStart) if err != nil { klog.Infof("HTTP Trace: Dial to %s:%s failed: %v", network, addr, err) } else { @@ -517,7 +517,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e TLSHandshakeDone: func(_ tls.ConnectionState, _ error) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.TLSHandshake = time.Now().Sub(tlsStart) + reqInfo.TLSHandshake = time.Since(tlsStart) }, // Connection (it can be DNS + Dial or just the time to get one from the connection pool) GetConn: func(hostPort string) { @@ -526,7 +526,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e GotConn: func(info httptrace.GotConnInfo) { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.GetConnection = time.Now().Sub(getConn) + reqInfo.GetConnection = time.Since(getConn) reqInfo.ConnectionReused = info.Reused }, // Server Processing (time since we wrote the request until first byte is received) @@ -538,7 +538,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e GotFirstResponseByte: func() { reqInfo.muTrace.Lock() defer reqInfo.muTrace.Unlock() - reqInfo.ServerProcessing = time.Now().Sub(serverStart) + reqInfo.ServerProcessing = time.Since(serverStart) }, } req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index 68a0a704f..8e312800f 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -18,8 +18,8 @@ package transport import ( "fmt" - "io/ioutil" "net/http" + "os" "strings" "sync" "time" @@ -132,7 +132,7 @@ type fileTokenSource struct { var _ = oauth2.TokenSource(&fileTokenSource{}) func (ts *fileTokenSource) Token() (*oauth2.Token, error) { - tokb, err := ioutil.ReadFile(ts.path) + tokb, err := os.ReadFile(ts.path) if err != nil { return nil, fmt.Errorf("failed to read token file %q: %v", ts.path, err) } diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index eabfce72d..4770331a0 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -22,9 +22,8 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" "net/http" - "reflect" + "os" "sync" "time" @@ -62,20 +61,12 @@ func New(config *Config) (http.RoundTripper, error) { } func isValidHolders(config *Config) bool { - if config.TLS.GetCertHolder != nil { - if config.TLS.GetCertHolder.GetCert == nil || - config.TLS.GetCert == nil || - reflect.ValueOf(config.TLS.GetCertHolder.GetCert).Pointer() != reflect.ValueOf(config.TLS.GetCert).Pointer() { - return false - } + if config.TLS.GetCertHolder != nil && config.TLS.GetCertHolder.GetCert == nil { + return false } - if config.DialHolder != nil { - if config.DialHolder.Dial == nil || - config.Dial == nil || - reflect.ValueOf(config.DialHolder.Dial).Pointer() != reflect.ValueOf(config.Dial).Pointer() { - return false - } + if config.DialHolder != nil && config.DialHolder.Dial == nil { + return false } return true @@ -105,6 +96,32 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCA() { + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + >2. client verifies the apiserver certificate sent against its cluster certificate authority data + 3. client sending its client certificate along with its public key to the apiserver + 4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this block, + cluster certificate authority data gets loaded into TLS before the handshake process + for client to later during the handshake verify the apiserver certificate + + normal args related to this stage: + --certificate-authority='': + Path to a cert file for the certificate authority + + (retrievable from "kubectl options" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 3, see: a few lines below in this file + - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go + */ + rootCAs, err := rootCertPool(c.TLS.CAData) if err != nil { return nil, fmt.Errorf("unable to load root certificates: %w", err) @@ -130,6 +147,35 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCertAuth() || c.HasCertCallback() { + + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + 2. client verifies the apiserver certificate sent against its cluster certificate authority data + >3. client sending its client certificate along with its public key to the apiserver + 4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this callback function, + client certificate and pub key get loaded into TLS during the handshake process + for apiserver to later in the step 4 verify the client certificate + + normal args related to this stage: + --client-certificate='': + Path to a client certificate file for TLS + --client-key='': + Path to a client key file for TLS + + (retrievable from "kubectl options" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 2, see: a few lines above in this file + - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go + */ + tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { // Note: static key/cert data always take precedence over cert // callback. @@ -141,7 +187,7 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { return dynamicCertLoader() } if c.HasCertCallback() { - cert, err := c.TLS.GetCert() + cert, err := c.TLS.GetCertHolder.GetCert() if err != nil { return nil, err } @@ -182,10 +228,7 @@ func loadTLSFiles(c *Config) error { } c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) - if err != nil { - return err - } - return nil + return err } // dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, @@ -195,7 +238,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { return data, nil } if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) + fileData, err := os.ReadFile(file) if err != nil { return []byte{}, err } diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS index 3cf036438..3c3b94c58 100644 --- a/vendor/k8s.io/client-go/util/cert/OWNERS +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -1,9 +1,8 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: -- sig-auth-certificates-approvers + - sig-auth-certificates-approvers reviewers: -- sig-auth-certificates-reviewers + - sig-auth-certificates-reviewers labels: -- sig/auth - + - sig/auth diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 75143ec07..91e171271 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -25,9 +25,10 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" + "math" "math/big" "net" + "os" "path/filepath" "strings" "time" @@ -44,6 +45,7 @@ type Config struct { Organization []string AltNames AltNames Usages []x509.ExtKeyUsage + NotBefore time.Time } // AltNames contains the domain names and IP addresses that will be added @@ -57,14 +59,24 @@ type AltNames struct { // NewSelfSignedCACert creates a CA certificate func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) + notBefore := now.UTC() + if !cfg.NotBefore.IsZero() { + notBefore = cfg.NotBefore.UTC() + } tmpl := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(0), + SerialNumber: serial, Subject: pkix.Name{ CommonName: cfg.CommonName, Organization: cfg.Organization, }, DNSNames: []string{cfg.CommonName}, - NotBefore: now.UTC(), + NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, @@ -101,9 +113,9 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt") keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key") if len(fixtureDirectory) > 0 { - cert, err := ioutil.ReadFile(certFixturePath) + cert, err := os.ReadFile(certFixturePath) if err == nil { - key, err := ioutil.ReadFile(keyFixturePath) + key, err := os.ReadFile(keyFixturePath) if err == nil { return cert, key, nil } @@ -116,9 +128,14 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err != nil { return nil, nil, err } - + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) caTemplate := x509.Certificate{ - SerialNumber: big.NewInt(1), + SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()), }, @@ -144,9 +161,14 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a if err != nil { return nil, nil, err } - + // returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max). + serial, err = cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, nil, err + } + serial = new(big.Int).Add(serial, big.NewInt(1)) template := x509.Certificate{ - SerialNumber: big.NewInt(2), + SerialNumber: serial, Subject: pkix.Name{ CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), }, @@ -188,10 +210,10 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a } if len(fixtureDirectory) > 0 { - if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err) } - if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil { + if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil { return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err) } } diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index 35fde68a4..a70e51327 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -19,7 +19,6 @@ package cert import ( "crypto/x509" "fmt" - "io/ioutil" "os" "path/filepath" ) @@ -66,13 +65,13 @@ func WriteCert(certPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { return err } - return ioutil.WriteFile(certPath, data, os.FileMode(0644)) + return os.WriteFile(certPath, data, os.FileMode(0644)) } // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { - pemBlock, err := ioutil.ReadFile(filename) + pemBlock, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -101,7 +100,7 @@ func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) { // CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func CertsFromFile(file string) ([]*x509.Certificate, error) { - pemBlock, err := ioutil.ReadFile(file) + pemBlock, err := os.ReadFile(file) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go new file mode 100644 index 000000000..b33d08032 --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go @@ -0,0 +1,146 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +type RetrieveItemsFunc[U any] func() []U + +type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) + +// CheckDataConsistency exists solely for testing purposes. +// we cannot use checkWatchListDataConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { + if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { + klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) + return + } + klog.Warningf("data consistency check for %s is enabled, this will result in an additional call to the API server.", identity) + + retrievedItems := toMetaObjectSliceOrDie(retrieveItemsFn()) + listOptions = prepareListCallOptions(lastSyncedResourceVersion, listOptions, len(retrievedItems)) + var list runtime.Object + err := wait.PollUntilContextCancel(ctx, time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listFn(ctx, listOptions) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the data consistency check for %s won't be performed, stopCh was closed, err: %v", identity, err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + listItems := toMetaObjectSliceOrDie(rawListItems) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(retrievedItems)) + + if !cmp.Equal(listItems, retrievedItems) { + klog.Infof("previously received data for %s is different than received by the standard list api call against etcd, diff: %v", identity, cmp.Diff(listItems, retrievedItems)) + msg := fmt.Sprintf("data inconsistency detected for %s, panicking!", identity) + panic(msg) + } +} + +// canFormAdditionalListCall ensures that we can form a valid LIST requests +// for checking data consistency. +func canFormAdditionalListCall(lastSyncedResourceVersion string, listOptions metav1.ListOptions) bool { + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the continuation hasn't been set + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L38 + if len(listOptions.Continue) > 0 { + return false + } + + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the RV is valid because the validation code forbids RV == "0" + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L44 + if lastSyncedResourceVersion == "0" { + return false + } + + return true +} + +// prepareListCallOptions changes the input list options so that +// the list call goes directly to etcd +func prepareListCallOptions(lastSyncedResourceVersion string, listOptions metav1.ListOptions, retrievedItemsCount int) metav1.ListOptions { + // this is our legacy case: + // + // the watch cache skips the Limit if the ResourceVersion was set to "0" + // thus, to compare with data retrieved directly from etcd + // we need to skip the limit to for the list call as well. + // + // note that when the number of retrieved items is less than the request limit, + // it means either the watch cache is disabled, or there is not enough data. + // in both cases, we can use the limit because we will be able to compare + // the data with the items retrieved from etcd. + if listOptions.ResourceVersion == "0" && listOptions.Limit > 0 && int64(retrievedItemsCount) > listOptions.Limit { + listOptions.Limit = 0 + } + + // set the RV and RVM so that we get the snapshot of data + // directly from etcd. + listOptions.ResourceVersion = lastSyncedResourceVersion + listOptions.ResourceVersionMatch = metav1.ResourceVersionMatchExact + + return listOptions +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go new file mode 100644 index 000000000..7610c05c2 --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go @@ -0,0 +1,70 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForListFromCacheEnabled = false + +func init() { + dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR")) +} + +// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup +// for requests that have a high chance of being served from the watch-cache. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by a list api call from the watch-cache +// is exactly the same as data received by the list api call from etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +// +// Note that this function doesn't examine the ListOptions to determine +// if the original request has hit the cache because it would be challenging +// to maintain consistency with the server-side implementation. +// For simplicity, we assume that the first request retrieved data from +// the cache (even though this might not be true for some requests) +// and issue the second call to get data from etcd for comparison. +func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !dataConsistencyDetectionForListFromCacheEnabled { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} + +func checkListFromCacheDataConsistencyIfRequestedInternal[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + receivedListMeta, err := meta.ListAccessor(receivedList) + if err != nil { + panic(err) + } + rawListItems, err := meta.ExtractListWithAlloc(receivedList) + if err != nil { + panic(err) // this should never happen + } + lastSyncedResourceVersion := receivedListMeta.GetResourceVersion() + CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listItemsFn, optionsUsedToReceiveList, func() []runtime.Object { return rawListItems }) +} diff --git a/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go new file mode 100644 index 000000000..cda5fc205 --- /dev/null +++ b/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForWatchListEnabled = false + +func init() { + dataConsistencyDetectionForWatchListEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// IsDataConsistencyDetectionForWatchListEnabled returns true when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +func IsDataConsistencyDetectionForWatchListEnabled() bool { + return dataConsistencyDetectionForWatchListEnabled +} + +// CheckWatchListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call against etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func CheckWatchListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !IsDataConsistencyDetectionForWatchListEnabled() { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 3ef88dbdb..82e4c4c40 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -23,7 +23,6 @@ import ( "k8s.io/utils/clock" testingclock "k8s.io/utils/clock/testing" - "k8s.io/utils/integer" ) type backoffEntry struct { @@ -100,7 +99,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) { } else { delay := entry.backoff * 2 // exponential delay += p.jitter(entry.backoff) // add some jitter to the delay - entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + entry.backoff = min(delay, p.maxDuration) } entry.lastUpdate = p.Clock.Now() } diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS index 470b7a1c9..e6d229d5d 100644 --- a/vendor/k8s.io/client-go/util/keyutil/OWNERS +++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS @@ -1,7 +1,6 @@ approvers: -- sig-auth-certificates-approvers + - sig-auth-certificates-approvers reviewers: -- sig-auth-certificates-reviewers + - sig-auth-certificates-reviewers labels: -- sig/auth - + - sig/auth diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go index 83c2c6254..ecd3e4710 100644 --- a/vendor/k8s.io/client-go/util/keyutil/key.go +++ b/vendor/k8s.io/client-go/util/keyutil/key.go @@ -26,7 +26,6 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "io/ioutil" "os" "path/filepath" ) @@ -69,13 +68,13 @@ func WriteKey(keyPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { return err } - return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) + return os.WriteFile(keyPath, data, os.FileMode(0600)) } // LoadOrGenerateKeyFile looks for a key in the file at the given path. If it // can't find one, it will generate a new key and store it there. func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { - loadedData, err := ioutil.ReadFile(keyPath) + loadedData, err := os.ReadFile(keyPath) // Call verifyKeyData to ensure the file wasn't empty/corrupt. if err == nil && verifyKeyData(loadedData) { return loadedData, false, err @@ -122,7 +121,7 @@ func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { // PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. // Returns an error if the file could not be read or if the private key could not be parsed. func PrivateKeyFromFile(file string) (interface{}, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return nil, err } @@ -136,7 +135,7 @@ func PrivateKeyFromFile(file string) (interface{}, error) { // PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. // Reads public keys from both public and private key files. func PublicKeysFromFile(file string) ([]interface{}, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/util/watchlist/watch_list.go b/vendor/k8s.io/client-go/util/watchlist/watch_list.go new file mode 100644 index 000000000..84106458a --- /dev/null +++ b/vendor/k8s.io/client-go/util/watchlist/watch_list.go @@ -0,0 +1,82 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watchlist + +import ( + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientfeatures "k8s.io/client-go/features" + "k8s.io/utils/ptr" +) + +var scheme = runtime.NewScheme() + +func init() { + utilruntime.Must(metainternalversion.AddToScheme(scheme)) +} + +// PrepareWatchListOptionsFromListOptions creates a new ListOptions +// that can be used for a watch-list request from the given listOptions. +// +// This function also determines if the given listOptions can be used to form a watch-list request, +// which would result in streaming semantically equivalent data from the server. +func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return metav1.ListOptions{}, false, nil + } + + internalListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + watchListOptions := listOptions + // this is our legacy case, the cache ignores LIMIT for + // ResourceVersion == 0 and RVM=unset|NotOlderThan + if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" { + return metav1.ListOptions{}, false, nil + } + watchListOptions.Limit = 0 + + // to ensure that we can create a watch-list request that returns + // semantically equivalent data for the given listOptions, + // we need to validate that the RVM for the list is supported by watch-list requests. + if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + return metav1.ListOptions{}, false, nil + } + watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + + watchListOptions.Watch = true + watchListOptions.AllowWatchBookmarks = true + watchListOptions.SendInitialEvents = ptr.To(true) + + internalWatchListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + return watchListOptions, true, nil +} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index efda7c197..1f9567881 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -24,49 +24,66 @@ import ( "golang.org/x/time/rate" ) -type RateLimiter interface { +// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead. +type RateLimiter TypedRateLimiter[any] + +type TypedRateLimiter[T comparable] interface { // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration + When(item T) time.Duration // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int + NumRequeues(item T) int } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has // both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +// +// Deprecated: Use DefaultTypedControllerRateLimiter instead. func DefaultControllerRateLimiter() RateLimiter { - return NewMaxOfRateLimiter( - NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + return DefaultTypedControllerRateLimiter[any]() +} + +// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedMaxOfRateLimiter( + NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API -type BucketRateLimiter struct { +// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead. +type BucketRateLimiter = TypedBucketRateLimiter[any] + +// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type TypedBucketRateLimiter[T comparable] struct { *rate.Limiter } var _ RateLimiter = &BucketRateLimiter{} -func (r *BucketRateLimiter) When(item interface{}) time.Duration { +func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration { return r.Limiter.Reserve().Delay() } -func (r *BucketRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int { return 0 } -func (r *BucketRateLimiter) Forget(item interface{}) { +func (r *TypedBucketRateLimiter[T]) Forget(item T) { } -// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit +// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead. +type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any] + +// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit // dealing with max failures and expiration are up to the caller -type ItemExponentialFailureRateLimiter struct { +type TypedItemExponentialFailureRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int baseDelay time.Duration maxDelay time.Duration @@ -74,19 +91,29 @@ type ItemExponentialFailureRateLimiter struct { var _ RateLimiter = &ItemExponentialFailureRateLimiter{} +// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead. func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { - return &ItemExponentialFailureRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay) +} + +func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedItemExponentialFailureRateLimiter[T]{ + failures: map[T]int{}, baseDelay: baseDelay, maxDelay: maxDelay, } } +// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead. func DefaultItemBasedRateLimiter() RateLimiter { - return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) + return DefaultTypedItemBasedRateLimiter[any]() } -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { +func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second) +} + +func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -107,14 +134,14 @@ func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration return calculated } -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { +func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -122,9 +149,13 @@ func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { } // ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that -type ItemFastSlowRateLimiter struct { +// Deprecated: Use TypedItemFastSlowRateLimiter instead. +type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any] + +// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type TypedItemFastSlowRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int maxFastAttempts int fastDelay time.Duration @@ -133,16 +164,21 @@ type ItemFastSlowRateLimiter struct { var _ RateLimiter = &ItemFastSlowRateLimiter{} +// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead. func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { - return &ItemFastSlowRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts) +} + +func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] { + return &TypedItemFastSlowRateLimiter[T]{ + failures: map[T]int{}, fastDelay: fastDelay, slowDelay: slowDelay, maxFastAttempts: maxFastAttempts, } } -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { +func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -155,14 +191,14 @@ func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { return r.slowDelay } -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { +func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -172,11 +208,18 @@ func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { // MaxOfRateLimiter calls every RateLimiter and returns the worst case response // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items // were separately delayed a longer time. -type MaxOfRateLimiter struct { - limiters []RateLimiter +// +// Deprecated: Use TypedMaxOfRateLimiter instead. +type MaxOfRateLimiter = TypedMaxOfRateLimiter[any] + +// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type TypedMaxOfRateLimiter[T comparable] struct { + limiters []TypedRateLimiter[T] } -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { +func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration { ret := time.Duration(0) for _, limiter := range r.limiters { curr := limiter.When(item) @@ -188,11 +231,16 @@ func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { return ret } -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { - return &MaxOfRateLimiter{limiters: limiters} +// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead. +func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter { + return NewTypedMaxOfRateLimiter(limiters...) } -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { +func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] { + return &TypedMaxOfRateLimiter[T]{limiters: limiters} +} + +func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int { ret := 0 for _, limiter := range r.limiters { curr := limiter.NumRequeues(item) @@ -204,23 +252,32 @@ func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { return ret } -func (r *MaxOfRateLimiter) Forget(item interface{}) { +func (r *TypedMaxOfRateLimiter[T]) Forget(item T) { for _, limiter := range r.limiters { limiter.Forget(item) } } // WithMaxWaitRateLimiter have maxDelay which avoids waiting too long -type WithMaxWaitRateLimiter struct { - limiter RateLimiter +// Deprecated: Use TypedWithMaxWaitRateLimiter instead. +type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any] + +// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type TypedWithMaxWaitRateLimiter[T comparable] struct { + limiter TypedRateLimiter[T] maxDelay time.Duration } +// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead. func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { - return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} + return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay) +} + +func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay} } -func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { +func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration { delay := w.limiter.When(item) if delay > w.maxDelay { return w.maxDelay @@ -229,10 +286,10 @@ func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { return delay } -func (w WithMaxWaitRateLimiter) Forget(item interface{}) { +func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) { w.limiter.Forget(item) } -func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { +func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int { return w.limiter.NumRequeues(item) } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index 61c4da530..958b96a80 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -27,42 +27,115 @@ import ( // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // requeue items after failures without ending up in a hot-loop. -type DelayingInterface interface { - Interface +// +// Deprecated: use TypedDelayingInterface instead. +type DelayingInterface TypedDelayingInterface[any] + +// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type TypedDelayingInterface[T comparable] interface { + TypedInterface[T] // AddAfter adds an item to the workqueue after the indicated duration has passed - AddAfter(item interface{}, duration time.Duration) + AddAfter(item T, duration time.Duration) +} + +// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +// +// Deprecated: use TypedDelayingQueueConfig instead. +type DelayingQueueConfig = TypedDelayingQueueConfig[any] + +// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type TypedDelayingQueueConfig[T comparable] struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // Queue optionally allows injecting custom queue Interface instead of the default one. + Queue TypedInterface[T] } -// NewDelayingQueue constructs a new workqueue with delayed queuing ability +// NewDelayingQueue constructs a new workqueue with delayed queuing ability. +// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewDelayingQueueWithConfig instead and specify a name. +// +// Deprecated: use TypedNewDelayingQueue instead. func NewDelayingQueue() DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, "") + return NewDelayingQueueWithConfig(DelayingQueueConfig{}) +} + +// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability. +// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// TypedNewDelayingQueueWithConfig instead and specify a name. +func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { + return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{}) +} + +// NewDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +// +// Deprecated: use TypedNewDelayingQueueWithConfig instead. +func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + return NewTypedDelayingQueueWithConfig[any](config) +} + +// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.Queue == nil { + config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider) } // NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to // inject custom queue Interface instead of the default one +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface { - return newDelayingQueue(clock.RealClock{}, q, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Queue: q, + }) } -// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability +// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewNamedDelayingQueue(name string) DelayingInterface { - return NewDelayingQueueWithCustomClock(clock.RealClock{}, name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name}) } // NewDelayingQueueWithCustomClock constructs a new named workqueue -// with ability to inject real or fake clock for testing purposes +// with ability to inject real or fake clock for testing purposes. +// Deprecated: Use NewDelayingQueueWithConfig instead. func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface { - return newDelayingQueue(clock, NewNamed(name), name) + return NewDelayingQueueWithConfig(DelayingQueueConfig{ + Name: name, + Clock: clock, + }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType { - ret := &delayingType{ - Interface: q, +func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { + ret := &delayingType[T]{ + TypedInterface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), + metrics: newRetryMetrics(name, provider), } go ret.waitingLoop() @@ -70,8 +143,8 @@ func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayin } // delayingType wraps an Interface and provides delayed re-enquing -type delayingType struct { - Interface +type delayingType[T comparable] struct { + TypedInterface[T] // clock tracks time for delayed firing clock clock.Clock @@ -148,16 +221,16 @@ func (pq waitForPriorityQueue) Peek() interface{} { // ShutDown stops the queue. After the queue drains, the returned shutdown bool // on Get() will be true. This method may be invoked more than once. -func (q *delayingType) ShutDown() { +func (q *delayingType[T]) ShutDown() { q.stopOnce.Do(func() { - q.Interface.ShutDown() + q.TypedInterface.ShutDown() close(q.stopCh) q.heartbeat.Stop() }) } // AddAfter adds the given item to the work queue after the given delay -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { +func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { // don't add if we're already shutting down if q.ShuttingDown() { return @@ -184,7 +257,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { const maxWait = 10 * time.Second // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType) waitingLoop() { +func (q *delayingType[T]) waitingLoop() { defer utilruntime.HandleCrash() // Make a placeholder channel to use when there are no items in our list @@ -199,7 +272,7 @@ func (q *delayingType) waitingLoop() { waitingEntryByData := map[t]*waitFor{} for { - if q.Interface.ShuttingDown() { + if q.TypedInterface.ShuttingDown() { return } @@ -213,7 +286,7 @@ func (q *delayingType) waitingLoop() { } entry = heap.Pop(waitingForQueue).(*waitFor) - q.Add(entry.data) + q.Add(entry.data.(T)) delete(waitingEntryByData, entry.data) } @@ -242,7 +315,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } drained := false @@ -252,7 +325,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } default: drained = true diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index 4b0a69616..f012ccc55 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -244,13 +244,18 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu } } -func newRetryMetrics(name string) retryMetrics { +func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { var ret *defaultRetryMetrics if len(name) == 0 { return ret } + + if provider == nil { + provider = globalMetricsFactory.metricsProvider + } + return &defaultRetryMetrics{ - retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name), + retries: provider.NewRetriesMetric(name), } } diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index 6f7063269..ff715482c 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -23,35 +23,152 @@ import ( "k8s.io/utils/clock" ) -type Interface interface { - Add(item interface{}) +// Deprecated: Interface is deprecated, use TypedInterface instead. +type Interface TypedInterface[any] + +type TypedInterface[T comparable] interface { + Add(item T) Len() int - Get() (item interface{}, shutdown bool) - Done(item interface{}) + Get() (item T, shutdown bool) + Done(item T) ShutDown() ShutDownWithDrain() ShuttingDown() bool } +// Queue is the underlying storage for items. The functions below are always +// called from the same goroutine. +type Queue[T comparable] interface { + // Touch can be hooked when an existing item is added again. This may be + // useful if the implementation allows priority change for the given item. + Touch(item T) + // Push adds a new item. + Push(item T) + // Len tells the total number of items. + Len() int + // Pop retrieves an item. + Pop() (item T) +} + +// DefaultQueue is a slice based FIFO queue. +func DefaultQueue[T comparable]() Queue[T] { + return new(queue[T]) +} + +// queue is a slice which implements Queue. +type queue[T comparable] []T + +func (q *queue[T]) Touch(item T) {} + +func (q *queue[T]) Push(item T) { + *q = append(*q, item) +} + +func (q *queue[T]) Len() int { + return len(*q) +} + +func (q *queue[T]) Pop() (item T) { + item = (*q)[0] + + // The underlying array still exists and reference this object, so the object will not be garbage collected. + (*q)[0] = *new(T) + *q = (*q)[1:] + + return item +} + +// QueueConfig specifies optional configurations to customize an Interface. +// Deprecated: use TypedQueueConfig instead. +type QueueConfig = TypedQueueConfig[any] + +type TypedQueueConfig[T comparable] struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock ability to inject real or fake clock for testing purposes. + Clock clock.WithTicker + + // Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue. + Queue Queue[T] +} + // New constructs a new work queue (see the package comment). +// +// Deprecated: use NewTyped instead. func New() *Type { - return NewNamed("") + return NewWithConfig(QueueConfig{ + Name: "", + }) +} + +// NewTyped constructs a new work queue (see the package comment). +func NewTyped[T comparable]() *Typed[T] { + return NewTypedWithConfig(TypedQueueConfig[T]{ + Name: "", + }) +} + +// NewWithConfig constructs a new workqueue with ability to +// customize different properties. +// +// Deprecated: use NewTypedWithConfig instead. +func NewWithConfig(config QueueConfig) *Type { + return NewTypedWithConfig(config) } +// NewTypedWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] { + return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) +} + +// NewNamed creates a new named queue. +// Deprecated: Use NewWithConfig instead. func NewNamed(name string) *Type { - rc := clock.RealClock{} + return NewWithConfig(QueueConfig{ + Name: name, + }) +} + +// newQueueWithConfig constructs a new named workqueue +// with the ability to customize different properties for testing purposes +func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] { + var metricsFactory *queueMetricsFactory + if config.MetricsProvider != nil { + metricsFactory = &queueMetricsFactory{ + metricsProvider: config.MetricsProvider, + } + } else { + metricsFactory = &globalMetricsFactory + } + + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.Queue == nil { + config.Queue = DefaultQueue[T]() + } + return newQueue( - rc, - globalMetricsFactory.newQueueMetrics(name, rc), - defaultUnfinishedWorkUpdatePeriod, + config.Clock, + config.Queue, + metricsFactory.newQueueMetrics(config.Name, config.Clock), + updatePeriod, ) } -func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { - t := &Type{ +func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] { + t := &Typed[T]{ clock: c, - dirty: set{}, - processing: set{}, + queue: queue, + dirty: set[T]{}, + processing: set[T]{}, cond: sync.NewCond(&sync.Mutex{}), metrics: metrics, unfinishedWorkUpdatePeriod: updatePeriod, @@ -69,20 +186,23 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond // Type is a work queue (see the package comment). -type Type struct { +// Deprecated: Use Typed instead. +type Type = Typed[any] + +type Typed[t comparable] struct { // queue defines the order in which we will work on items. Every // element of queue should be in the dirty set and not in the // processing set. - queue []t + queue Queue[t] // dirty defines all of the items that need to be processed. - dirty set + dirty set[t] // Things that are currently being processed are in the processing set. // These things may be simultaneously in the dirty set. When we finish // processing something and remove it from this set, we'll check if // it's in the dirty set, and if so, add it to the queue. - processing set + processing set[t] cond *sync.Cond @@ -97,33 +217,38 @@ type Type struct { type empty struct{} type t interface{} -type set map[t]empty +type set[t comparable] map[t]empty -func (s set) has(item t) bool { +func (s set[t]) has(item t) bool { _, exists := s[item] return exists } -func (s set) insert(item t) { +func (s set[t]) insert(item t) { s[item] = empty{} } -func (s set) delete(item t) { +func (s set[t]) delete(item t) { delete(s, item) } -func (s set) len() int { +func (s set[t]) len() int { return len(s) } // Add marks item as needing processing. -func (q *Type) Add(item interface{}) { +func (q *Typed[T]) Add(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() if q.shuttingDown { return } if q.dirty.has(item) { + // the same item is added again before it is processed, call the Touch + // function if the queue cares about it (for e.g, reset its priority) + if !q.processing.has(item) { + q.queue.Touch(item) + } return } @@ -134,37 +259,34 @@ func (q *Type) Add(item interface{}) { return } - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } // Len returns the current queue length, for informational purposes only. You // shouldn't e.g. gate a call to Add() or Get() on Len() being a particular // value, that can't be synchronized properly. -func (q *Type) Len() int { +func (q *Typed[T]) Len() int { q.cond.L.Lock() defer q.cond.L.Unlock() - return len(q.queue) + return q.queue.Len() } // Get blocks until it can return an item to be processed. If shutdown = true, // the caller should end their goroutine. You must call Done with item when you // have finished processing it. -func (q *Type) Get() (item interface{}, shutdown bool) { +func (q *Typed[T]) Get() (item T, shutdown bool) { q.cond.L.Lock() defer q.cond.L.Unlock() - for len(q.queue) == 0 && !q.shuttingDown { + for q.queue.Len() == 0 && !q.shuttingDown { q.cond.Wait() } - if len(q.queue) == 0 { + if q.queue.Len() == 0 { // We must be shutting down. - return nil, true + return *new(T), true } - item = q.queue[0] - // The underlying array still exists and reference this object, so the object will not be garbage collected. - q.queue[0] = nil - q.queue = q.queue[1:] + item = q.queue.Pop() q.metrics.get(item) @@ -177,7 +299,7 @@ func (q *Type) Get() (item interface{}, shutdown bool) { // Done marks item as done processing, and if it has been marked as dirty again // while it was being processed, it will be re-added to the queue for // re-processing. -func (q *Type) Done(item interface{}) { +func (q *Typed[T]) Done(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -185,7 +307,7 @@ func (q *Type) Done(item interface{}) { q.processing.delete(item) if q.dirty.has(item) { - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } else if q.processing.len() == 0 { q.cond.Signal() @@ -194,9 +316,13 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. -func (q *Type) ShutDown() { - q.setDrain(false) - q.shutdown() +func (q *Typed[T]) ShutDown() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.drain = false + q.shuttingDown = true + q.cond.Broadcast() } // ShutDownWithDrain will cause q to ignore all new items added to it. As soon @@ -208,64 +334,27 @@ func (q *Type) ShutDown() { // indefinitely. It is, however, safe to call ShutDown after having called // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. -func (q *Type) ShutDownWithDrain() { - q.setDrain(true) - q.shutdown() - for q.isProcessing() && q.shouldDrain() { - q.waitForProcessing() - } -} - -// isProcessing indicates if there are still items on the work queue being -// processed. It's used to drain the work queue on an eventual shutdown. -func (q *Type) isProcessing() bool { - q.cond.L.Lock() - defer q.cond.L.Unlock() - return q.processing.len() != 0 -} - -// waitForProcessing waits for the worker goroutines to finish processing items -// and call Done on them. -func (q *Type) waitForProcessing() { +func (q *Typed[T]) ShutDownWithDrain() { q.cond.L.Lock() defer q.cond.L.Unlock() - // Ensure that we do not wait on a queue which is already empty, as that - // could result in waiting for Done to be called on items in an empty queue - // which has already been shut down, which will result in waiting - // indefinitely. - if q.processing.len() == 0 { - return - } - q.cond.Wait() -} -func (q *Type) setDrain(shouldDrain bool) { - q.cond.L.Lock() - defer q.cond.L.Unlock() - q.drain = shouldDrain -} - -func (q *Type) shouldDrain() bool { - q.cond.L.Lock() - defer q.cond.L.Unlock() - return q.drain -} - -func (q *Type) shutdown() { - q.cond.L.Lock() - defer q.cond.L.Unlock() + q.drain = true q.shuttingDown = true q.cond.Broadcast() + + for q.processing.len() != 0 && q.drain { + q.cond.Wait() + } } -func (q *Type) ShuttingDown() bool { +func (q *Typed[T]) ShuttingDown() bool { q.cond.L.Lock() defer q.cond.L.Unlock() return q.shuttingDown } -func (q *Type) updateUnfinishedWorkLoop() { +func (q *Typed[T]) updateUnfinishedWorkLoop() { t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) defer t.Stop() for range t.C() { diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 8321876ac..fe45afa5a 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -16,54 +16,132 @@ limitations under the License. package workqueue +import "k8s.io/utils/clock" + // RateLimitingInterface is an interface that rate limits items being added to the queue. -type RateLimitingInterface interface { - DelayingInterface +// +// Deprecated: Use TypedRateLimitingInterface instead. +type RateLimitingInterface TypedRateLimitingInterface[any] + +// TypedRateLimitingInterface is an interface that rate limits items being added to the queue. +type TypedRateLimitingInterface[T comparable] interface { + TypedDelayingInterface[T] // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok - AddRateLimited(item interface{}) + AddRateLimited(item T) // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many times the item was requeued - NumRequeues(item interface{}) int + NumRequeues(item T) int +} + +// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. +// +// Deprecated: Use TypedRateLimitingQueueConfig instead. +type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any] + +// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface. +type TypedRateLimitingQueueConfig[T comparable] struct { + // Name for the queue. If unnamed, the metrics will not be registered. + Name string + + // MetricsProvider optionally allows specifying a metrics provider to use for the queue + // instead of the global provider. + MetricsProvider MetricsProvider + + // Clock optionally allows injecting a real or fake clock for testing purposes. + Clock clock.WithTicker + + // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. + DelayingQueue TypedDelayingInterface[T] } // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. +// NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewRateLimitingQueueWithConfig instead and specify a name. +// +// Deprecated: Use NewTypedRateLimitingQueue instead. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewDelayingQueue(), - rateLimiter: rateLimiter, + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) +} + +// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewTypedRateLimitingQueueWithConfig instead and specify a name. +func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{}) +} + +// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +// +// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead. +func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, config) +} + +// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] { + if config.Clock == nil { + config.Clock = clock.RealClock{} + } + + if config.DelayingQueue == nil { + config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{ + Name: config.Name, + MetricsProvider: config.MetricsProvider, + Clock: config.Clock, + }) + } + + return &rateLimitingType[T]{ + TypedDelayingInterface: config.DelayingQueue, + rateLimiter: rateLimiter, } } +// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { - return &rateLimitingType{ - DelayingInterface: NewNamedDelayingQueue(name), - rateLimiter: rateLimiter, - } + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + Name: name, + }) +} + +// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability +// with the option to inject a custom delaying queue instead of the default one. +// Deprecated: Use NewRateLimitingQueueWithConfig instead. +func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface { + return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{ + DelayingQueue: di, + }) } // rateLimitingType wraps an Interface and provides rateLimited re-enquing -type rateLimitingType struct { - DelayingInterface +type rateLimitingType[T comparable] struct { + TypedDelayingInterface[T] - rateLimiter RateLimiter + rateLimiter TypedRateLimiter[T] } // AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok -func (q *rateLimitingType) AddRateLimited(item interface{}) { - q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +func (q *rateLimitingType[T]) AddRateLimited(item T) { + q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } -func (q *rateLimitingType) NumRequeues(item interface{}) int { +func (q *rateLimitingType[T]) NumRequeues(item T) int { return q.rateLimiter.NumRequeues(item) } -func (q *rateLimitingType) Forget(item interface{}) { +func (q *rateLimitingType[T]) Forget(item T) { q.rateLimiter.Forget(item) } diff --git a/vendor/k8s.io/klog/v2/.golangci.yaml b/vendor/k8s.io/klog/v2/.golangci.yaml new file mode 100644 index 000000000..0d77d65f0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/.golangci.yaml @@ -0,0 +1,6 @@ +linters: + disable-all: true + enable: # sorted alphabetical + - gofmt + - misspell + - revive diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index ad5063fdf..7500475a6 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,19 +1,16 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 + - harshanarayana + - mengjiao-liu - pohly - - yagonobre - - vincepri - - detiber approvers: - dims + - pohly - thockin - - justinsb - - tallclair - - piosz +emeritus_approvers: - brancz + - justinsb - lavalamp + - piosz - serathius + - tallclair diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 64d29622e..d45cbe172 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -23,6 +23,20 @@ Historical context is available here: * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ +## Release versioning + +Semantic versioning is used in this repository. It contains several Go modules +with different levels of stability: +- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags +- `examples` - no stable API, no tags, no intention to ever stabilize + +Exempt from the API stability guarantee are items (packages, functions, etc.) +which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those +may still change in incompatible ways or get removed entirely. This can only +be used for code that is used in tests to avoid situations where non-test +code from two different Kubernetes dependencies depends on incompatible +releases of klog because an experimental API was changed. + ---- How to use klog @@ -32,6 +46,7 @@ How to use klog - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) +- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. @@ -85,7 +100,7 @@ The comment from glog.go introduces the ideas: glog.Fatalf("Initialization failed: %s", err) - See the documentation for the V function for an explanation + See the documentation of the V function for an explanation of these examples: if glog.V(2) { diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go new file mode 100644 index 000000000..005513f2a --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -0,0 +1,212 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + + "github.com/go-logr/logr" +) + +// This file provides the implementation of +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging +// +// SetLogger and ClearLogger were originally added to klog.go and got moved +// here. Contextual logging adds a way to retrieve a Logger for direct logging +// without the logging calls in klog.go. +// +// The global variables are expected to be modified only during sequential +// parts of a program (init, serial tests) and therefore are not protected by +// mutex locking. + +var ( + // klogLogger is used as fallback for logging through the normal klog code + // when no Logger is set. + klogLogger logr.Logger = logr.New(&klogger{}) +) + +// SetLogger sets a Logger implementation that will be used as backing +// implementation of the traditional klog log calls. klog will do its own +// verbosity checks before calling logger.V().Info. logger.Error is always +// called, regardless of the klog verbosity settings. +// +// If set, all log lines will be suppressed from the regular output, and +// redirected to the logr implementation. +// Use as: +// +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func SetLogger(logger logr.Logger) { + SetLoggerWithOptions(logger) +} + +// SetLoggerWithOptions is a more flexible version of SetLogger. Without +// additional options, it behaves exactly like SetLogger. By passing +// ContextualLogger(true) as option, it can be used to set a logger that then +// will also get called directly by applications which retrieve it via +// FromContext, Background, or TODO. +// +// Supporting direct calls is recommended because it avoids the overhead of +// routing log entries through klogr into klog and then into the actual Logger +// backend. +func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { + logging.loggerOptions = loggerOptions{} + for _, opt := range opts { + opt(&logging.loggerOptions) + } + logging.logger = &logWriter{ + Logger: logger, + writeKlogBuffer: logging.loggerOptions.writeKlogBuffer, + } +} + +// ContextualLogger determines whether the logger passed to +// SetLoggerWithOptions may also get called directly. Such a logger cannot rely +// on verbosity checking in klog. +func ContextualLogger(enabled bool) LoggerOption { + return func(o *loggerOptions) { + o.contextualLogger = enabled + } +} + +// FlushLogger provides a callback for flushing data buffered by the logger. +func FlushLogger(flush func()) LoggerOption { + return func(o *loggerOptions) { + o.flush = flush + } +} + +// WriteKlogBuffer sets a callback that will be invoked by klog to write output +// produced by non-structured log calls like Infof. +// +// The buffer will contain exactly the same data that klog normally would write +// into its own output stream(s). In particular this includes the header, if +// klog is configured to write one. The callback then can divert that data into +// its own output streams. The buffer may or may not end in a line break. +// +// Without such a callback, klog will call the logger's Info or Error method +// with just the message string (i.e. no header). +func WriteKlogBuffer(write func([]byte)) LoggerOption { + return func(o *loggerOptions) { + o.writeKlogBuffer = write + } +} + +// LoggerOption implements the functional parameter paradigm for +// SetLoggerWithOptions. +type LoggerOption func(o *loggerOptions) + +type loggerOptions struct { + contextualLogger bool + flush func() + writeKlogBuffer func([]byte) +} + +// logWriter combines a logger (always set) with a write callback (optional). +type logWriter struct { + Logger + writeKlogBuffer func([]byte) +} + +// ClearLogger removes a backing Logger implementation if one was set earlier +// with SetLogger. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func ClearLogger() { + logging.logger = nil + logging.loggerOptions = loggerOptions{} +} + +// EnableContextualLogging controls whether contextual logging is enabled. +// By default it is enabled. When disabled, FromContext avoids looking up +// the logger in the context and always returns the global logger. +// LoggerWithValues, LoggerWithName, and NewContext become no-ops +// and return their input logger respectively context. This may be useful +// to avoid the additional overhead for contextual logging. +// +// This must be called during initialization before goroutines are started. +func EnableContextualLogging(enabled bool) { + logging.contextualLoggingEnabled = enabled +} + +// FromContext retrieves a logger set by the caller or, if not set, +// falls back to the program's global logger (a Logger instance or klog +// itself). +func FromContext(ctx context.Context) Logger { + if logging.contextualLoggingEnabled { + if logger, err := logr.FromContext(ctx); err == nil { + return logger + } + } + + return Background() +} + +// TODO can be used as a last resort by code that has no means of +// receiving a logger from its caller. FromContext or an explicit logger +// parameter should be used instead. +func TODO() Logger { + return Background() +} + +// Background retrieves the fallback logger. It should not be called before +// that logger was initialized by the program and not by code that should +// better receive a logger via its parameters. TODO can be used as a temporary +// solution for such code. +func Background() Logger { + if logging.loggerOptions.contextualLogger { + // Is non-nil because logging.loggerOptions.contextualLogger is + // only true if a logger was set. + return logging.logger.Logger + } + + return klogLogger +} + +// LoggerWithValues returns logger.WithValues(...kv) when +// contextual logging is enabled, otherwise the logger. +func LoggerWithValues(logger Logger, kv ...interface{}) Logger { + if logging.contextualLoggingEnabled { + return logger.WithValues(kv...) + } + return logger +} + +// LoggerWithName returns logger.WithName(name) when contextual logging is +// enabled, otherwise the logger. +func LoggerWithName(logger Logger, name string) Logger { + if logging.contextualLoggingEnabled { + return logger.WithName(name) + } + return logger +} + +// NewContext returns logr.NewContext(ctx, logger) when +// contextual logging is enabled, otherwise ctx. +func NewContext(ctx context.Context, logger Logger) context.Context { + if logging.contextualLoggingEnabled { + return logr.NewContext(ctx, logger) + } + return ctx +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go b/vendor/k8s.io/klog/v2/contextual_slog.go similarity index 55% rename from vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go rename to vendor/k8s.io/klog/v2/contextual_slog.go index 5c5f70d25..d3b562521 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go +++ b/vendor/k8s.io/klog/v2/contextual_slog.go @@ -1,3 +1,6 @@ +//go:build go1.21 +// +build go1.21 + /* Copyright 2021 The Kubernetes Authors. @@ -14,15 +17,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package klog import ( - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/client-go/pkg/apis/clientauthentication" + "log/slog" + + "github.com/go-logr/logr" ) -func Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - // This conversion intentionally omits the Response field, which were only - // supported in v1alpha1. - return autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in, out, s) +// SetSlogLogger reconfigures klog to log through the slog logger. The logger must not be nil. +func SetSlogLogger(logger *slog.Logger) { + SetLoggerWithOptions(logr.FromSlogHandler(logger.Handler()), ContextualLogger(true)) } diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go new file mode 100644 index 000000000..320a14772 --- /dev/null +++ b/vendor/k8s.io/klog/v2/exit.go @@ -0,0 +1,69 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package klog + +import ( + "fmt" + "os" + "time" +) + +var ( + + // ExitFlushTimeout is the timeout that klog has traditionally used during + // calls like Fatal or Exit when flushing log data right before exiting. + // Applications that replace those calls and do not have some specific + // requirements like "exit immediately" can use this value as parameter + // for FlushAndExit. + // + // Can be set for testing purpose or to change the application's + // default. + ExitFlushTimeout = 10 * time.Second + + // OsExit is the function called by FlushAndExit to terminate the program. + // + // Can be set for testing purpose or to change the application's + // default behavior. Note that the function should not simply return + // because callers of functions like Fatal will not expect that. + OsExit = os.Exit +) + +// FlushAndExit flushes log data for a certain amount of time and then calls +// os.Exit. Combined with some logging call it provides a replacement for +// traditional calls like Fatal or Exit. +func FlushAndExit(flushTimeout time.Duration, exitCode int) { + timeoutFlush(flushTimeout) + OsExit(exitCode) +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. Flushing also might take too long. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 000000000..63995ca6d --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go b/vendor/k8s.io/klog/v2/imports.go similarity index 50% rename from vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go rename to vendor/k8s.io/klog/v2/imports.go index 572e049f8..602c3ed9e 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/conversion.go +++ b/vendor/k8s.io/klog/v2/imports.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,14 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package klog import ( - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/client-go/pkg/apis/clientauthentication" + "github.com/go-logr/logr" ) -func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - // This conversion intentionally omits the Cluster field which is only supported in newer versions. - return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) -} +// The reason for providing these aliases is to allow code to work with logr +// without directly importing it. + +// Logger in this package is exactly the same as logr.Logger. +type Logger = logr.Logger + +// LogSink in this package is exactly the same as logr.LogSink. +type LogSink = logr.LogSink + +// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo. +type RuntimeInfo = logr.RuntimeInfo + +var ( + // New is an alias for logr.New. + New = logr.New +) diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go new file mode 100644 index 000000000..46de00fb0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -0,0 +1,184 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buffer provides a cache for byte.Buffer instances that can be reused +// to avoid frequent allocation and deallocation. It also has utility code +// for log header formatting that use these buffers. +package buffer + +import ( + "bytes" + "os" + "sync" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +var ( + // Pid is inserted into log headers. Can be overridden for tests. + Pid = os.Getpid() + + // Time, if set, will be used instead of the actual current time. + Time *time.Time +) + +// Buffer holds a single byte.Buffer for reuse. The zero value is ready for +// use. It also provides some helper methods for output formatting. +type Buffer struct { + bytes.Buffer + Tmp [64]byte // temporary byte array for creating headers. +} + +var buffers = sync.Pool{ + New: func() interface{} { + return new(Buffer) + }, +} + +// GetBuffer returns a new, ready-to-use buffer. +func GetBuffer() *Buffer { + b := buffers.Get().(*Buffer) + b.Reset() + return b +} + +// PutBuffer returns a buffer to the free list. +func PutBuffer(b *Buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death, without relying on + // sync.Pool behavior. The documentation implies that items may + // get deallocated while stored there ("If the Pool holds the + // only reference when this [= be removed automatically] + // happens, the item might be deallocated."), but + // https://github.com/golang/go/issues/23199 leans more towards + // having such a size limit. + return + } + + buffers.Put(b) +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i]. +func (buf *Buffer) twoDigits(i, d int) { + buf.Tmp[i+1] = digits[d%10] + d /= 10 + buf.Tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.Tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *Buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.Tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.Tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i]. +func (buf *Buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.Tmp) + for { + j-- + buf.Tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.Tmp[i:], buf.Tmp[j:]) +} + +// FormatHeader formats a log header using the provided file name and line number +// and writes it into the buffer. +func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ' ' + buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID + buf.Tmp[29] = ' ' + buf.Write(buf.Tmp[:30]) + buf.WriteString(file) + buf.Tmp[0] = ':' + n := buf.someDigits(1, line) + buf.Tmp[n+1] = ']' + buf.Tmp[n+2] = ' ' + buf.Write(buf.Tmp[:n+3]) +} + +// SprintHeader formats a log header and returns a string. This is a simpler +// version of FormatHeader for use in ktesting. +func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ']' + return string(buf.Tmp[:22]) +} diff --git a/vendor/k8s.io/klog/v2/internal/clock/README.md b/vendor/k8s.io/klog/v2/internal/clock/README.md new file mode 100644 index 000000000..03d692c8f --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/README.md @@ -0,0 +1,7 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. + +This is a copy of k8s.io/utils/clock. We have to copy it to avoid a circular +dependency (k8s.io/klog -> k8s.io/utils -> k8s.io/klog). diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go new file mode 100644 index 000000000..cc11bb480 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -0,0 +1,161 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ Clock = RealClock{} + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go new file mode 100644 index 000000000..f27bd1447 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go @@ -0,0 +1,42 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbg provides some helper code for call traces. +package dbg + +import ( + "runtime" +) + +// Stacks is a wrapper for runtime.Stack that attempts to recover the data for +// all goroutines or the calling one. +func Stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go new file mode 100644 index 000000000..d1a4751c9 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -0,0 +1,292 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-logr/logr" +) + +type textWriter interface { + WriteText(*bytes.Buffer) +} + +// WithValues implements LogSink.WithValues. The old key/value pairs are +// assumed to be well-formed, the new ones are checked and padded if +// necessary. It returns a new slice. +func WithValues(oldKV, newKV []interface{}) []interface{} { + if len(newKV) == 0 { + return oldKV + } + newLen := len(oldKV) + len(newKV) + hasMissingValue := newLen%2 != 0 + if hasMissingValue { + newLen++ + } + // The new LogSink must have its own slice. + kv := make([]interface{}, 0, newLen) + kv = append(kv, oldKV...) + kv = append(kv, newKV...) + if hasMissingValue { + kv = append(kv, missingValue) + } + return kv +} + +// MergeKVs deduplicates elements provided in two key/value slices. +// +// Keys in each slice are expected to be unique, so duplicates can only occur +// when the first and second slice contain the same key. When that happens, the +// key/value pair from the second slice is used. The first slice must be well-formed +// (= even key/value pairs). The second one may have a missing value, in which +// case the special "missing value" is added to the result. +func MergeKVs(first, second []interface{}) []interface{} { + maxLength := len(first) + (len(second)+1)/2*2 + if maxLength == 0 { + // Nothing to do at all. + return nil + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + return second + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + merged := make([]interface{}, 0, maxLength) + for i := 0; i+1 < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + merged = append(merged, key, first[i+1]) + } + merged = append(merged, second...) + if len(merged)%2 != 0 { + merged = append(merged, missingValue) + } + return merged +} + +type Formatter struct { + AnyToStringHook AnyToStringFunc +} + +type AnyToStringFunc func(v interface{}) string + +// MergeKVsInto is a variant of MergeKVs which directly formats the key/value +// pairs into a buffer. +func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + if len(first) == 0 && len(second) == 0 { + // Nothing to do at all. + return + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + for i := 0; i < len(second); i += 2 { + f.KVFormat(b, second[i], second[i+1]) + } + return + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + for i := 0; i < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + f.KVFormat(b, key, first[i+1]) + } + // Round down. + l := len(second) + l = l / 2 * 2 + for i := 1; i < l; i += 2 { + f.KVFormat(b, second[i-1], second[i]) + } + if len(second)%2 == 1 { + f.KVFormat(b, second[len(second)-1], missingValue) + } +} + +func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + Formatter{}.MergeAndFormatKVs(b, first, second) +} + +const missingValue = "(MISSING)" + +// KVListFormat serializes all key/value pairs into the provided buffer. +// A space gets inserted before the first pair and between each pair. +func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + f.KVFormat(b, k, v) + } +} + +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + Formatter{}.KVListFormat(b, keysAndValues...) +} + +func KVFormat(b *bytes.Buffer, k, v interface{}) { + Formatter{}.KVFormat(b, k, v) +} + +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') + if f.AnyToStringHook != nil { + b.WriteString(f.AnyToStringHook(v)) + return + } + formatAsJSON(b, v) +} + +func formatAsJSON(b *bytes.Buffer, v interface{}) { + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return + } + // Remove trailing newline. + b.Truncate(b.Len() - 1) +} + +// StringerToString converts a Stringer to a string, +// handling panics if they occur. +func StringerToString(s fmt.Stringer) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = s.String() + return +} + +// MarshalerToValue invokes a marshaler and catches +// panics. +func MarshalerToValue(m logr.Marshaler) (ret interface{}) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = m.MarshalLog() + return +} + +// ErrorToString converts an error to a string, +// handling panics if they occur. +func ErrorToString(err error) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = err.Error() + return +} + +func writeTextWriterValue(b *bytes.Buffer, v textWriter) { + b.WriteByte('=') + defer func() { + if err := recover(); err != nil { + fmt.Fprintf(b, `""`, err) + } + }() + v.WriteText(b) +} + +func writeStringValue(b *bytes.Buffer, v string) { + data := []byte(v) + index := bytes.IndexByte(data, '\n') + if index == -1 { + b.WriteByte('=') + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) + return + } + + // Complex multi-line string, show as-is with indention like this: + // I... "hello world" key=< + // line 1 + // line 2 + // > + // + // Tabs indent the lines of the value while the end of string delimiter + // is indented with a space. That has two purposes: + // - visual difference between the two for a human reader because indention + // will be different + // - no ambiguity when some value line starts with the end delimiter + // + // One downside is that the output cannot distinguish between strings that + // end with a line break and those that don't because the end delimiter + // will always be on the next line. + b.WriteString("=<\n") + for index != -1 { + b.WriteByte('\t') + b.Write(data[0 : index+1]) + data = data[index+1:] + index = bytes.IndexByte(data, '\n') + } + if len(data) == 0 { + // String ended with line break, don't add another. + b.WriteString(" >") + } else { + // No line break at end of last line, write rest of string and + // add one. + b.WriteByte('\t') + b.Write(data) + b.WriteString("\n >") + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go new file mode 100644 index 000000000..d9c7d1546 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -0,0 +1,97 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go new file mode 100644 index 000000000..89acf9772 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -0,0 +1,155 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "log/slog" + "strconv" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + // + // slog.LogValuer does not need to be handled here because the handler will + // already have resolved such special values to the final value for logging. + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case slog.Value: + // This must come before fmt.Stringer because slog.Value implements + // fmt.Stringer, but does not produce the output that we want. + b.WriteByte('=') + generateJSON(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case slog.LogValuer: + value := slog.AnyValue(v).Resolve() + if value.Kind() == slog.KindString { + writeStringValue(b, value.String()) + } else { + b.WriteByte('=') + generateJSON(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +// generateJSON has the same preference for plain strings as KVFormat. +// In contrast to KVFormat it always produces valid JSON with no line breaks. +func generateJSON(b *bytes.Buffer, v interface{}) { + switch v := v.(type) { + case slog.Value: + switch v.Kind() { + case slog.KindGroup: + // Format as a JSON group. We must not involve f.AnyToStringHook (if there is any), + // because there is no guarantee that it produces valid JSON. + b.WriteByte('{') + for i, attr := range v.Group() { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(strconv.Quote(attr.Key)) + b.WriteByte(':') + generateJSON(b, attr.Value) + } + b.WriteByte('}') + case slog.KindLogValuer: + generateJSON(b, v.Resolve()) + default: + // Peel off the slog.Value wrapper and format the actual value. + generateJSON(b, v.Any()) + } + case fmt.Stringer: + b.WriteString(strconv.Quote(StringerToString(v))) + case logr.Marshaler: + generateJSON(b, MarshalerToValue(v)) + case slog.LogValuer: + generateJSON(b, slog.AnyValue(v).Resolve().Any()) + case string: + b.WriteString(strconv.Quote(v)) + case error: + b.WriteString(strconv.Quote(v.Error())) + default: + formatAsJSON(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go new file mode 100644 index 000000000..30fa1834f --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go @@ -0,0 +1,58 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package severity provides definitions for klog severity (info, warning, ...) +package severity + +import ( + "strings" +) + +// severity identifies the sort of log: info, warning etc. The binding to flag.Value +// is handled in klog.go +type Severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + InfoLog Severity = iota + WarningLog + ErrorLog + FatalLog + NumSeverity = 4 +) + +// Char contains one shortcut letter per severity level. +const Char = "IWEF" + +// Name contains one name per severity level. +var Name = []string{ + InfoLog: "INFO", + WarningLog: "WARNING", + ErrorLog: "ERROR", + FatalLog: "FATAL", +} + +// ByName looks up a severity level by name. +func ByName(s string) (Severity, bool) { + s = strings.ToUpper(s) + for i, name := range Name { + if name == s { + return Severity(i), true + } + } + return 0, false +} diff --git a/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go new file mode 100644 index 000000000..21f1697d0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sloghandler + +import ( + "context" + "log/slog" + "runtime" + "strings" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error { + now := record.Time + if now.IsZero() { + // This format doesn't support printing entries without a time. + now = time.Now() + } + + // slog has numeric severity levels, with 0 as default "info", negative for debugging, and + // positive with some pre-defined levels for more important. Those ranges get mapped to + // the corresponding klog levels where possible, with "info" the default that is used + // also for negative debug levels. + level := record.Level + s := severity.InfoLog + switch { + case level >= slog.LevelError: + s = severity.ErrorLog + case level >= slog.LevelWarn: + s = severity.WarningLog + } + + var file string + var line int + if record.PC != 0 { + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{record.PC}) + f, _ := fs.Next() + if f.File != "" { + file = f.File + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + line = f.Line + } + } else { + file = "???" + line = 1 + } + + kvList := make([]interface{}, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = appendAttr(groups, kvList, attr) + return true + }) + + printWithInfos(file, line, now, nil, s, record.Message, kvList) + return nil +} + +func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} { + kvList := make([]interface{}, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = appendAttr(groups, kvList, attr) + } + return kvList +} + +func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} { + var key string + if groups != "" { + key = groups + "." + attr.Key + } else { + key = attr.Key + } + return append(kvList, key, attr.Value) +} diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go new file mode 100644 index 000000000..786af74bf --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -0,0 +1,212 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + "github.com/go-logr/logr" +) + +// ObjectRef references a kubernetes object +type ObjectRef struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` +} + +func (ref ObjectRef) String() string { + if ref.Namespace != "" { + var builder strings.Builder + builder.Grow(len(ref.Namespace) + len(ref.Name) + 1) + builder.WriteString(ref.Namespace) + builder.WriteRune('/') + builder.WriteString(ref.Name) + return builder.String() + } + return ref.Name +} + +func (ref ObjectRef) WriteText(out *bytes.Buffer) { + out.WriteRune('"') + ref.writeUnquoted(out) + out.WriteRune('"') +} + +func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) { + if ref.Namespace != "" { + out.WriteString(ref.Namespace) + out.WriteRune('/') + } + out.WriteString(ref.Name) +} + +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + +// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +// this interface may expand in the future, but will always be a subset of the +// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +type KMetadata interface { + GetName() string + GetNamespace() string +} + +// KObj returns ObjectRef from ObjectMeta +func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + + return ObjectRef{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// KRef returns ObjectRef from name and namespace +func KRef(namespace, name string) ObjectRef { + return ObjectRef{ + Name: name, + Namespace: namespace, + } +} + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +// +// DEPRECATED: Use KObjSlice instead, it has better performance. +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} + +// KObjSlice takes a slice of objects that implement the KMetadata interface +// and returns an object that gets logged as a slice of ObjectRef values or a +// string containing those values, depending on whether the logger prefers text +// output or structured output. +// +// An error string is logged when KObjSlice is not passed a suitable slice. +// +// Processing of the argument is delayed until the value actually gets logged, +// in contrast to KObjs where that overhead is incurred regardless of whether +// the result is needed. +func KObjSlice(arg interface{}) interface{} { + return kobjSlice{arg: arg} +} + +type kobjSlice struct { + arg interface{} +} + +var _ fmt.Stringer = kobjSlice{} +var _ logr.Marshaler = kobjSlice{} + +func (ks kobjSlice) String() string { + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr + } + return fmt.Sprintf("%v", objectRefs) +} + +func (ks kobjSlice) MarshalLog() interface{} { + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr + } + return objectRefs +} + +func (ks kobjSlice) process() (objs []interface{}, err string) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as nil. + return nil, "" + case reflect.Slice: + // Okay, handle below. + default: + return nil, fmt.Sprintf("", ks.arg) + } + objectRefs := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + item := s.Index(i).Interface() + if item == nil { + objectRefs = append(objectRefs, nil) + } else if v, ok := item.(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil, fmt.Sprintf("", item) + } + } + return objectRefs, "" +} + +var nilToken = []byte("null") + +func (ks kobjSlice) WriteText(out *bytes.Buffer) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as null. + out.Write(nilToken) + return + case reflect.Slice: + // Okay, handle below. + default: + fmt.Fprintf(out, `""`, ks.arg) + return + } + out.Write([]byte{'['}) + defer out.Write([]byte{']'}) + for i := 0; i < s.Len(); i++ { + if i > 0 { + out.Write([]byte{','}) + } + item := s.Index(i).Interface() + if item == nil { + out.Write(nilToken) + } else if v, ok := item.(KMetadata); ok { + KObj(v).WriteText(out) + } else { + fmt.Fprintf(out, `""`, item) + return + } + } +} diff --git a/vendor/k8s.io/klog/v2/k8s_references_slog.go b/vendor/k8s.io/klog/v2/k8s_references_slog.go new file mode 100644 index 000000000..5522c84c7 --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references_slog.go @@ -0,0 +1,39 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" +) + +func (ref ObjectRef) LogValue() slog.Value { + if ref.Namespace != "" { + return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace)) + } + return slog.GroupValue(slog.String("name", ref.Name)) +} + +var _ slog.LogValuer = ObjectRef{} + +func (ks kobjSlice) LogValue() slog.Value { + return slog.AnyValue(ks.MarshalLog()) +} + +var _ slog.LogValuer = kobjSlice{} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 45efbb075..47ec9466a 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -14,9 +14,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// Package klog contains the following functionality: +// +// - output routing as defined via command line flags ([InitFlags]) +// - log formatting as text, either with a single, unstructured string ([Info], [Infof], etc.) +// or as a structured log entry with message and key/value pairs ([InfoS], etc.) +// - management of a go-logr [Logger] ([SetLogger], [Background], [TODO]) +// - helper functions for logging values ([Format]) and managing the state of klog ([CaptureState], [State.Restore]) +// - wrappers for [logr] APIs for contextual logging where the wrappers can +// be turned into no-ops ([EnableContextualLogging], [NewContext], [FromContext], +// [LoggerWithValues], [LoggerWithName]); if the ability to turn off +// contextual logging is not needed, then go-logr can also be used directly +// - type aliases for go-logr types to simplify imports in code which uses both (e.g. [Logger]) +// - [k8s.io/klog/v2/textlogger]: a logger which uses the same formatting as klog log with +// simpler output routing; beware that it comes with its own command line flags +// and does not use the ones from klog +// - [k8s.io/klog/v2/ktesting]: per-test output in Go unit tests +// - [k8s.io/klog/v2/klogr]: a deprecated, standalone [logr.Logger] on top of the main klog package; +// use [Background] instead if klog output routing is needed, [k8s.io/klog/v2/textlogger] if not +// - [k8s.io/klog/v2/examples]: demos of this functionality +// - [k8s.io/klog/v2/test]: reusable tests for [logr.Logger] implementations // // Basic examples: // @@ -39,35 +56,38 @@ // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=true -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. +// -logtostderr=true +// Logs are written to standard error instead of to files. +// This shortcuts most of the usual output routing: +// -alsologtostderr, -stderrthreshold and -log_dir have no +// effect and output redirection at runtime with SetOutput is +// ignored. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. // -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". +// Other flags provide aids to debugging. // +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". package klog import ( @@ -81,7 +101,6 @@ import ( "math" "os" "path/filepath" - "reflect" "runtime" "strconv" "strings" @@ -89,82 +108,58 @@ import ( "sync/atomic" "time" - "github.com/go-logr/logr" + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/dbg" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" ) -// severity identifies the sort of log: info, warning etc. It also implements +// severityValue identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", +type severityValue struct { + severity.Severity } // get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) +func (s *severityValue) get() severity.Severity { + return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity))) } // set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) +func (s *severityValue) set(val severity.Severity) { + atomic.StoreInt32((*int32)(&s.Severity), int32(val)) } // String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) +func (s *severityValue) String() string { + return strconv.FormatInt(int64(s.Severity), 10) } // Get is part of the flag.Getter interface. -func (s *severity) Get() interface{} { - return *s +func (s *severityValue) Get() interface{} { + return s.Severity } // Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity +func (s *severityValue) Set(value string) error { + var threshold severity.Severity // Is it a known name? - if v, ok := severityByName(value); ok { + if v, ok := severity.ByName(value); ok { threshold = v } else { v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } - threshold = severity(v) + threshold = severity.Severity(v) } logging.stderrThreshold.set(threshold) return nil } -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { lines int64 @@ -187,10 +182,10 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [severity.NumSeverity]*OutputStats{ + severity.InfoLog: &Stats.Info, + severity.WarningLog: &Stats.Warning, + severity.ErrorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is @@ -266,6 +261,10 @@ func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() + return m.serialize() +} + +func (m *moduleSpec) serialize() string { var b bytes.Buffer for i, f := range m.filter { if i > 0 { @@ -287,6 +286,17 @@ var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of // Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { + filter, err := parseModuleSpec(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +func parseModuleSpec(value string) ([]modulePat, error) { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { @@ -295,15 +305,15 @@ func (m *moduleSpec) Set(value string) error { } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax + return nil, errVmoduleSyntax } pattern := patLev[0] v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") + return nil, errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { - return errors.New("negative value for vmodule level") + return nil, errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. @@ -311,10 +321,7 @@ func (m *moduleSpec) Set(value string) error { // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil + return filter, nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters @@ -397,50 +404,48 @@ func (t *traceLocation) Set(value string) error { return nil } -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} +var logging loggingT +var commandLine flag.FlagSet -// init sets up the defaults and runs flushDaemon. +// init sets up the defaults and creates command line flags. func init() { - logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. + commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)") + commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)") + commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, + "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") + commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)") logging.setVState(0, nil, false) - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - logging.oneOutput = false - go logging.flushDaemon() + commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity") + commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages") + commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") + commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") + commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + logging.settings.contextualLoggingEnabled = true + logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. +// It may get called repeatedly for different flagsets, but not +// twice for the same one. May get called concurrently +// to other goroutines using klog. However, only some flags +// may get set concurrently (see implementation). func InitFlags(flagset *flag.FlagSet) { if flagset == nil { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") - flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ - "If the value is 0, the maximum file size is unlimited.") - flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + commandLine.VisitAll(func(f *flag.Flag) { + flagset.Var(f.Value, f.Name, f.Usage) + }) } // Flush flushes all pending log I/O. @@ -448,8 +453,20 @@ func Flush() { logging.lockAndFlushAll() } -// loggingT collects all the global state of the logging setup. -type loggingT struct { +// settings collects global settings. +type settings struct { + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled bool + + // logger is the global Logger chosen by users of klog, nil if + // none is available. + logger *logWriter + + // loggerOptions contains the options that were supplied for + // globalLogger. + loggerOptions loggerOptions + // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. @@ -457,25 +474,16 @@ type loggingT struct { alsoToStderr bool // The -alsologtostderr flag. // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. + stderrThreshold severityValue // The -stderrthreshold flag. - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex + // Access to all of the following fields must be protected via a mutex. - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level + file [severity.NumSeverity]io.Writer + // flushInterval is the interval for periodic flushing. If zero, + // the global default will be used. + flushInterval time.Duration + // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. @@ -508,9 +516,6 @@ type loggingT struct { // If true, add the file directory to the header addDirHeader bool - // If set, all output will be redirected unconditionally to the provided logr.Logger - logr *logr.Logger - // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -518,14 +523,40 @@ type loggingT struct { filter LogFilter } -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer +// deepCopy creates a copy that doesn't share anything with the original +// instance. +func (s settings) deepCopy() settings { + // vmodule is a slice and would be shared, so we have copy it. + filter := make([]modulePat, len(s.vmodule.filter)) + copy(filter, s.vmodule.filter) + s.vmodule.filter = filter + + if s.logger != nil { + logger := *s.logger + s.logger = &logger + } + + return s } -var logging loggingT +// loggingT collects all the global state of the logging setup. +type loggingT struct { + settings + + // flushD holds a flushDaemon that frequently flushes log file buffers. + // Uses its own mutex. + flushD *flushDaemon + + // mu protects the remaining elements of this structure and the fields + // in settingsT which need a mutex lock. + mu sync.Mutex + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level +} // setVState sets a consistent state for V logging. // l.mu is held. @@ -547,36 +578,56 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool l.verbosity.set(verbosity) } -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() +var timeNow = time.Now // Stubbed out for testing. + +// CaptureState gathers information about all current klog settings. +// The result can be used to restore those settings. +func CaptureState() State { + logging.mu.Lock() + defer logging.mu.Unlock() + return &state{ + settings: logging.settings.deepCopy(), + flushDRunning: logging.flushD.isRunning(), + maxSize: MaxSize, } - return b } -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() +// State stores a snapshot of klog settings. It gets created with CaptureState +// and can be used to restore the entire state. Modifying individual settings +// is supported via the command line flags. +type State interface { + // Restore restore the entire state. It may get called more than once. + Restore() } -var timeNow = time.Now // Stubbed out for testing. +type state struct { + settings + + flushDRunning bool + maxSize uint64 +} + +func (s *state) Restore() { + // This needs to be done before mutex locking. + if s.flushDRunning && !logging.flushD.isRunning() { + // This is not quite accurate: StartFlushDaemon might + // have been called with some different interval. + interval := s.flushInterval + if interval == 0 { + interval = flushInterval + } + logging.flushD.run(interval) + } else if !s.flushDRunning && logging.flushD.isRunning() { + logging.flushD.stop() + } + + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.settings = s.settings + logging.setVState(s.verbosity, s.vmodule.filter, true) + MaxSize = s.maxSize +} /* header formats a log header as defined by the C++ implementation. @@ -584,8 +635,11 @@ It returns a buffer containing the formatted header and the user's file and line The depth specifies how many stack frames above lives the source line to be identified in the log message. Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... + where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) mm The month (zero padded; ie May is '05') dd The day (zero padded) @@ -595,7 +649,7 @@ where the fields are defined as follows: line The line number msg The user-supplied message */ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { +func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) { _, file, line, ok := runtime.Caller(3 + depth) if !ok { file = "???" @@ -611,137 +665,90 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { } } } - return l.formatHeader(s, file, line), file, line + return l.formatHeader(s, file, line, timeNow()), file, line } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() +func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer { + buf := buffer.GetBuffer() if l.skipHeaders { return buf } - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) + buf.FormatHeader(s, file, line, now) return buf } -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } +func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { + l.printlnDepth(s, logger, filter, 1, args...) } -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } +func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println } - return copy(buf.tmp[i:], buf.tmp[j:]) -} -func (l *loggingT) println(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { - buf, file, line := l.header(s, 0) - // if logger is set, we clear the generated header as we rely on the backing - // logger implementation to print headers - if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + buf, file, line := l.header(s, depth) + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print + } + buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() + l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...) +} + +func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() +func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) { + l.printfDepth(s, logger, filter, 1, format, args...) +} + +func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { + if false { + _ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf + } + + buf, file, line := l.header(s, depth) + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -750,19 +757,20 @@ func (l *loggingT) printf(s severity, logger *logr.Logger, filter LogFilter, for if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logger, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.putBuffer(buf) - buf = l.getBuffer() +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line, timeNow()) + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -774,8 +782,8 @@ func (l *loggingT) printWithFileLine(s severity, logger *logr.Logger, filter Log l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } -// if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +// if logger is specified, will call logger.Error, otherwise output with logging module. +func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -783,11 +791,11 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } - l.printS(err, errorLog, depth+1, msg, keysAndValues...) + l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } -// if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +// if logger is specified, will call logger.Info, otherwise output with logging module. +func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -795,101 +803,33 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } - l.printS(nil, infoLog, depth+1, msg, keysAndValues...) + l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } -// printS is called from infoS and errorS if loggr is not specified. +// printS is called from infoS and errorS if logger is not specified. // set log severity by s -func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { - b := &bytes.Buffer{} - b.WriteString(fmt.Sprintf("%q", msg)) +func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) if err != nil { - b.WriteByte(' ') - b.WriteString(fmt.Sprintf("err=%q", err.Error())) - } - kvListFormat(b, keysAndValues...) - l.printDepth(s, logging.logr, nil, depth+1, b) -} - -const missingValue = "(MISSING)" - -func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - for i := 0; i < len(keysAndValues); i += 2 { - var v interface{} - k := keysAndValues[i] - if i+1 < len(keysAndValues) { - v = keysAndValues[i+1] - } else { - v = missingValue - } - b.WriteByte(' ') - - switch v.(type) { - case string, error: - b.WriteString(fmt.Sprintf("%s=%q", k, v)) - case []byte: - b.WriteString(fmt.Sprintf("%s=%+q", k, v)) - default: - if _, ok := v.(fmt.Stringer); ok { - b.WriteString(fmt.Sprintf("%s=%q", k, v)) - } else { - b.WriteString(fmt.Sprintf("%s=%+v", k, v)) - } - } + serialize.KVListFormat(&b.Buffer, "err", err) } -} - -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - -// SetLogger will set the backing logr implementation for klog. -// If set, all log lines will be suppressed from the regular Output, and -// redirected to the logr implementation. -// Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) -// -// To remove a backing logr implemention, use ClearLogger. Setting an -// empty logger with SetLogger(logr.Logger{}) does not work. -func SetLogger(logr logr.Logger) { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = &logr -} - -// ClearLogger removes a backing logr implementation if one was set earlier -// with SetLogger. -func ClearLogger() { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = nil + serialize.KVListFormat(&b.Buffer, keysAndValues...) + l.printDepth(s, nil, nil, depth+1, &b.Buffer) + // Make the buffer available for reuse. + buffer.PutBuffer(b) } // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - for s := fatalLog; s >= infoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb + for s := severity.FatalLog; s >= severity.InfoLog; s-- { + logging.file[s] = w } } @@ -897,14 +837,11 @@ func SetOutput(w io.Writer) { func SetOutputBySeverity(name string, w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.file[sev] = w } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs @@ -916,21 +853,36 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { + var isLocked = true l.mu.Lock() + defer func() { + if isLocked { + // Unlock before returning in case that it wasn't done already. + l.mu.Unlock() + } + }() + if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) + buf.Write(dbg.Stacks(false)) } } data := buf.Bytes() - if log != nil { - // TODO: set 'severity' and caller information as structured log info - // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == errorLog { - l.logr.WithCallDepth(depth+3).Error(nil, string(data)) + if logger != nil { + if logger.writeKlogBuffer != nil { + logger.writeKlogBuffer(data) } else { - log.WithCallDepth(depth + 3).Info(string(data)) + if len(data) > 0 && data[len(data)-1] == '\n' { + data = data[:len(data)-1] + } + // TODO: set 'severity' and caller information as structured log info + // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} + if s == severity.ErrorLog { + logger.WithCallDepth(depth+3).Error(nil, string(data)) + } else { + logger.WithCallDepth(depth + 3).Info(string(data)) + } } } else if l.toStderr { os.Stderr.Write(data) @@ -942,13 +894,13 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, if logging.logFile != "" { // Since we are using a single log file, all of the items in l.file array // will point to the same file, so just use one of them to write data. - if l.file[infoLog] == nil { - if err := l.createFiles(infoLog); err != nil { + if l.file[severity.InfoLog] == nil { + if err := l.createFiles(severity.InfoLog); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } - l.file[infoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -958,92 +910,61 @@ func (l *loggingT) output(s severity, log *logr.Logger, buf *buffer, depth int, } if l.oneOutput { - l.file[s].Write(data) + _, _ = l.file[s].Write(data) } else { switch s { - case fatalLog: - l.file[fatalLog].Write(data) + case severity.FatalLog: + _, _ = l.file[severity.FatalLog].Write(data) fallthrough - case errorLog: - l.file[errorLog].Write(data) + case severity.ErrorLog: + _, _ = l.file[severity.ErrorLog].Write(data) fallthrough - case warningLog: - l.file[warningLog].Write(data) + case severity.WarningLog: + _, _ = l.file[severity.WarningLog].Write(data) fallthrough - case infoLog: - l.file[infoLog].Write(data) + case severity.InfoLog: + _, _ = l.file[severity.InfoLog].Write(data) } } } } - if s == fatalLog { + if s == severity.FatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(1) } // Dump all goroutine stacks before exiting. - trace := stacks(true) - // Write the stack trace for all goroutines to the stderr. - if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { - os.Stderr.Write(trace) + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(dbg.Stacks(false)) } + // Write the stack trace for all goroutines to the files. + trace := dbg.Stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { + for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) + _, _ = f.Write(trace) } } l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.putBuffer(buf) - l.mu.Unlock() + buffer.PutBuffer(buf) + if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) } } -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when klog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that @@ -1060,8 +981,9 @@ func (l *loggingT) exit(err error) { logExitFunc(err) return } - l.flushAll() - os.Exit(2) + needToSync := l.flushAll() + l.syncAll(needToSync) + OsExit(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the @@ -1072,15 +994,11 @@ type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File - sev severity + sev severity.Severity nbytes uint64 // The number of bytes written to this file maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { @@ -1118,7 +1036,7 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.file, _, err = create(severity.Name[sb.sev], now, startup) if err != nil { return err } @@ -1156,11 +1074,16 @@ const bufferSize = 256 * 1024 // createFiles creates all the log files for severity from sev down to infoLog. // l.mu is held. -func (l *loggingT) createFiles(sev severity) error { +func (l *loggingT) createFiles(sev severity.Severity) error { + interval := l.flushInterval + if interval == 0 { + interval = flushInterval + } + l.flushD.run(interval) now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { + for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, @@ -1177,30 +1100,135 @@ func (l *loggingT) createFiles(sev severity) error { const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() +type flushDaemon struct { + mu sync.Mutex + clock clock.Clock + flush func() + stopC chan struct{} + stopDone chan struct{} +} + +// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a +// clock.RealClock is used. +func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon { + if tickClock == nil { + tickClock = clock.RealClock{} + } + return &flushDaemon{ + flush: flush, + clock: tickClock, + } +} + +// run starts a goroutine that periodically calls the daemons flush function. +// Calling run on an already running daemon will have no effect. +func (f *flushDaemon) run(interval time.Duration) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC != nil { // daemon already running + return } + + f.stopC = make(chan struct{}, 1) + f.stopDone = make(chan struct{}, 1) + + ticker := f.clock.NewTicker(interval) + go func() { + defer ticker.Stop() + defer func() { f.stopDone <- struct{}{} }() + for { + select { + case <-ticker.C(): + f.flush() + case <-f.stopC: + f.flush() + return + } + } + }() +} + +// stop stops the running flushDaemon and waits until the daemon has shut down. +// Calling stop on a daemon that isn't running will have no effect. +func (f *flushDaemon) stop() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC == nil { // daemon not running + return + } + + f.stopC <- struct{}{} + <-f.stopDone + + f.stopC = nil + f.stopDone = nil +} + +// isRunning returns true if the flush daemon is running. +func (f *flushDaemon) isRunning() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.stopC != nil +} + +// StopFlushDaemon stops the flush daemon, if running, and flushes once. +// This prevents klog from leaking goroutines on shutdown. After stopping +// the daemon, you can still manually flush buffers again by calling Flush(). +func StopFlushDaemon() { + logging.flushD.stop() +} + +// StartFlushDaemon ensures that the flush daemon runs with the given delay +// between flush calls. If it is already running, it gets restarted. +func StartFlushDaemon(interval time.Duration) { + StopFlushDaemon() + logging.flushD.run(interval) } // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() - l.flushAll() + needToSync := l.flushAll() l.mu.Unlock() + // Some environments are slow when syncing and holding the lock might cause contention. + l.syncAll(needToSync) } -// flushAll flushes all the logs and attempts to "sync" their data to disk. +// flushAll flushes all the logs // l.mu is held. -func (l *loggingT) flushAll() { +// +// The result is the number of files which need to be synced and the pointers to them. +func (l *loggingT) flushAll() fileArray { + var needToSync fileArray + // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error + if sb, ok := file.(*syncBuffer); ok && sb.file != nil { + _ = sb.Flush() // ignore error + needToSync.files[needToSync.num] = sb.file + needToSync.num++ } } + if logging.loggerOptions.flush != nil { + logging.loggerOptions.flush() + } + return needToSync +} + +type fileArray struct { + num int + files [severity.NumSeverity]*os.File +} + +// syncAll attempts to "sync" their data to disk. +func (l *loggingT) syncAll(needToSync fileArray) { + // Flush from fatal down, in case there's trouble flushing. + for i := 0; i < needToSync.num; i++ { + _ = needToSync.files[i].Sync() // ignore error + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's @@ -1211,7 +1239,7 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) } @@ -1221,9 +1249,22 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge severity.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -1247,7 +1288,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text) return len(b), nil } @@ -1261,9 +1302,7 @@ func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } + file = strings.TrimSuffix(file, ".go") if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } @@ -1281,25 +1320,28 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr *logr.Logger - filter LogFilter + logger *logWriter } func newVerbose(level Level, b bool) Verbose { - if logging.logr == nil { - return Verbose{b, nil, logging.filter} + if logging.logger == nil { + return Verbose{b, nil} } - v := logging.logr.V(int(level)) - return Verbose{b, &v, logging.filter} + v := logging.logger.V(int(level)) + return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}} } // V reports whether verbosity at the call site is at least the requested level. // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2).Enabled() { klog.Info("log this") } +// +// if klog.V(2).Enabled() { klog.Info("log this") } +// // or +// // klog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1309,6 +1351,13 @@ func newVerbose(level Level, b bool) Verbose { // less than or equal to the value of the -vmodule pattern matching the source file // containing the call. func V(level Level) Verbose { + return VDepth(1, level) +} + +// VDepth is a variant of V that accepts a number of stack frames that will be +// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to +// V(). +func VDepth(depth int, level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. @@ -1325,7 +1374,7 @@ func V(level Level) Verbose { // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { + if runtime.Callers(2+depth, logging.pcs[:]) == 0 { return newVerbose(level, false) } // runtime.Callers returns "return PCs", but we want @@ -1353,7 +1402,15 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, v.filter, args...) + logging.print(severity.InfoLog, v.logger, logging.filter, args...) + } +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1361,7 +1418,15 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, v.filter, args...) + logging.println(severity.InfoLog, v.logger, logging.filter, args...) + } +} + +// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfolnDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1369,7 +1434,15 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, v.filter, format, args...) + logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...) + } +} + +// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { + if v.enabled { + logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...) } } @@ -1377,20 +1450,28 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...) } } // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...) +} + +// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...) + } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, args...) + logging.errorS(err, v.logger, logging.filter, 0, msg, args...) } } @@ -1398,32 +1479,44 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, logging.filter, args...) + logging.print(severity.InfoLog, logging.logger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, logging.filter, args...) + logging.println(severity.InfoLog, logging.logger, logging.filter, args...) +} + +// InfolnDepth acts as Infoln but uses depth to determine which call frame to log. +// InfolnDepth(0, "msg") is the same as Infoln("msg"). +func InfolnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...) +} + +// InfofDepth acts as Infof but uses depth to determine which call frame to log. +// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). +func InfofDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1435,55 +1528,79 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, logging.filter, args...) + logging.print(severity.WarningLog, logging.logger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, logging.filter, args...) + logging.println(severity.WarningLog, logging.logger, logging.filter, args...) +} + +// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. +// WarninglnDepth(0, "msg") is the same as Warningln("msg"). +func WarninglnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...) +} + +// WarningfDepth acts as Warningf but uses depth to determine which call frame to log. +// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). +func WarningfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, logging.filter, args...) + logging.print(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, logging.filter, args...) + logging.println(severity.ErrorLog, logging.logger, logging.filter, args...) +} + +// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. +// ErrorlnDepth(0, "msg") is the same as Errorln("msg"). +func ErrorlnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...) +} + +// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. +// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). +func ErrorfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1496,71 +1613,108 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// prints stack trace(s), then calls OsExit(255). +// +// Stderr only receives a dump of the current goroutine's stack trace. Log files, +// if there are any, receive a dump of the stack traces in all goroutines. +// +// Callers who want more control over handling of fatal events may instead use a +// combination of different functions: +// - some info or error logging function, optionally with a stack trace +// value generated by github.com/go-logr/lib/dbg.Backtrace +// - Flush to flush pending log data +// - panic, os.Exit or returning to the caller with an error +// // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) +} + +// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. +// FatallnDepth(0, "msg") is the same as Fatalln("msg"). +func FatallnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) +} + +// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. +// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). +func FatalfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32 -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. +// ExitlnDepth(0, "msg") is the same as Exitln("msg"). +func ExitlnDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) +} + +// ExitfDepth acts as Exitf but uses depth to determine which call frame to log. +// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). +func ExitfDepth(depth int, format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, @@ -1571,79 +1725,10 @@ type LogFilter interface { FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) } +// SetLogFilter installs a filter that is used for all log calls. +// +// Modifying the filter is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. func SetLogFilter(filter LogFilter) { - logging.mu.Lock() - defer logging.mu.Unlock() - logging.filter = filter } - -// ObjectRef references a kubernetes object -type ObjectRef struct { - Name string `json:"name"` - Namespace string `json:"namespace,omitempty"` -} - -func (ref ObjectRef) String() string { - if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) - } - return ref.Name -} - -// MarshalLog ensures that loggers with support for structured output will log -// as a struct by removing the String method via a custom type. -func (ref ObjectRef) MarshalLog() interface{} { - type or ObjectRef - return or(ref) -} - -var _ logr.Marshaler = ObjectRef{} - -// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -// this interface may expand in the future, but will always be a subset of the -// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -type KMetadata interface { - GetName() string - GetNamespace() string -} - -// KObj returns ObjectRef from ObjectMeta -func KObj(obj KMetadata) ObjectRef { - if obj == nil { - return ObjectRef{} - } - if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { - return ObjectRef{} - } - - return ObjectRef{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - } -} - -// KRef returns ObjectRef from name and namespace -func KRef(namespace, name string) ObjectRef { - return ObjectRef{ - Name: name, - Namespace: namespace, - } -} - -// KObjs returns slice of ObjectRef from an slice of ObjectMeta -func KObjs(arg interface{}) []ObjectRef { - s := reflect.ValueOf(arg) - if s.Kind() != reflect.Slice { - return nil - } - objectRefs := make([]ObjectRef, 0, s.Len()) - for i := 0; i < s.Len(); i++ { - if v, ok := s.Index(i).Interface().(KMetadata); ok { - objectRefs = append(objectRefs, KObj(v)) - } else { - return nil - } - } - return objectRefs -} diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index de830d922..8bee16204 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -22,9 +22,7 @@ import ( "errors" "fmt" "os" - "os/user" "path/filepath" - "runtime" "strings" "sync" "time" @@ -57,38 +55,6 @@ func init() { } } -func getUserName() string { - userNameOnce.Do(func() { - // On Windows, the Go 'user' package requires netapi32.dll. - // This affects Windows Nano Server: - // https://github.com/golang/go/issues/21867 - // Fallback to using environment variables. - if runtime.GOOS == "windows" { - u := os.Getenv("USERNAME") - if len(u) == 0 { - return - } - // Sanitize the USERNAME since it may contain filepath separators. - u = strings.Replace(u, `\`, "_", -1) - - // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' - d := os.Getenv("USERDOMAIN") - if len(d) != 0 { - userName = d + "_" + u - } else { - userName = u - } - } else { - current, err := user.Current() - if err == nil { - userName = current.Username - } - } - }) - - return userName -} - // shortHostname returns its argument, truncating at the first period. // For instance, given "www.google.com" it returns "www". func shortHostname(hostname string) string { @@ -143,8 +109,8 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err + _ = os.Remove(symlink) // ignore err + _ = os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go new file mode 100644 index 000000000..aa4672685 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_others.go @@ -0,0 +1,19 @@ +//go:build !windows +// +build !windows + +package klog + +import ( + "os/user" +) + +func getUserName() string { + userNameOnce.Do(func() { + current, err := user.Current() + if err == nil { + userName = current.Username + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go new file mode 100644 index 000000000..2517f9c53 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package klog + +import ( + "os" + "strings" +) + +func getUserName() string { + userNameOnce.Do(func() { + // On Windows, the Go 'user' package requires netapi32.dll. + // This affects Windows Nano Server: + // https://github.com/golang/go/issues/21867 + // Fallback to using environment variables. + u := os.Getenv("USERNAME") + if len(u) == 0 { + return + } + // Sanitize the USERNAME since it may contain filepath separators. + u = strings.Replace(u, `\`, "_", -1) + + // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' + d := os.Getenv("USERDOMAIN") + if len(d) != 0 { + userName = d + "_" + u + } else { + userName = u + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go new file mode 100644 index 000000000..efec96fd4 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -0,0 +1,105 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" +) + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + +// NewKlogr returns a logger that is functionally identical to +// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The +// difference is that it uses a simpler implementation. +func NewKlogr() Logger { + return New(&klogger{}) +} + +// klogger is a subset of klogr/klogr.go. It had to be copied to break an +// import cycle (klogr wants to use klog, and klog wants to use klogr). +type klogger struct { + callDepth int + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string +} + +func (l *klogger) Init(info logr.RuntimeInfo) { + l.callDepth += info.CallDepth +} + +func (l *klogger) Info(level int, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) + // Skip this function. + VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) +} + +func (l *klogger) Enabled(level int) bool { + return VDepth(l.callDepth+1, Level(level)).Enabled() +} + +func (l *klogger) Error(err error, msg string, kvList ...interface{}) { + merged := serialize.MergeKVs(l.values, kvList) + ErrorSDepth(l.callDepth+1, err, msg, merged...) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '.' characters to separate name elements. Callers should not pass '.' +// in the provided name string, but this library does not actually enforce that. +func (l klogger) WithName(name string) logr.LogSink { + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + l.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + l.values = v + l.hasPrefix = true + } + return &l +} + +func (l klogger) WithValues(kvList ...interface{}) logr.LogSink { + l.values = serialize.WithValues(l.values, kvList) + return &l +} + +func (l klogger) WithCallDepth(depth int) logr.LogSink { + l.callDepth += depth + return &l +} + +var _ logr.LogSink = &klogger{} +var _ logr.CallDepthLogSink = &klogger{} diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go new file mode 100644 index 000000000..c77d7baaf --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + "log/slog" + "strconv" + "time" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *klogger) Handle(ctx context.Context, record slog.Record) error { + if logging.logger != nil { + if slogSink, ok := logging.logger.GetSink().(logr.SlogSink); ok { + // Let that logger do the work. + return slogSink.Handle(ctx, record) + } + } + + return sloghandler.Handle(ctx, record, l.groups, slogOutput) +} + +// slogOutput corresponds to several different functions in klog.go. +// It goes through some of the same checks and formatting steps before +// it ultimately converges by calling logging.printWithInfos. +func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // See infoS. + if logging.logger != nil { + // Taking this path happens when klog has a logger installed + // as backend which doesn't support slog. Not good, we have to + // guess about the call depth and drop the actual location. + logger := logging.logger.WithCallDepth(2) + if s > severity.ErrorLog { + logger.Error(err, msg, kvList...) + } else { + logger.Info(msg, kvList...) + } + return + } + + // See printS. + b := buffer.GetBuffer() + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, kvList...) + + // See print + header. + buf := logging.formatHeader(s, file, line, now) + logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer) + + buffer.PutBuffer(b) +} + +func (l *klogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *klogger) WithGroup(name string) logr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ logr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/klog/v2/safeptr.go similarity index 52% rename from vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go rename to vendor/k8s.io/klog/v2/safeptr.go index 6741114dd..bbe24c2e8 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go +++ b/vendor/k8s.io/klog/v2/safeptr.go @@ -1,5 +1,8 @@ +//go:build go1.18 +// +build go1.18 + /* -Copyright 2018 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +17,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/client-go/pkg/apis/clientauthentication" -) +package klog -func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { - // This conversion intentionally omits the Response field, which were only - // supported in v1alpha1. - return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s) +// SafePtr is a function that takes a pointer of any type (T) as an argument. +// If the provided pointer is not nil, it returns the same pointer. If it is nil, it returns nil instead. +// +// This function is particularly useful to prevent nil pointer dereferencing when: +// +// - The type implements interfaces that are called by the logger, such as `fmt.Stringer`. +// - And these interface implementations do not perform nil checks themselves. +func SafePtr[T any](p *T) any { + if p == nil { + return nil + } + return p } diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go new file mode 100644 index 000000000..a66fe8a09 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -0,0 +1,290 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cached provides a cache mechanism based on etags to lazily +// build, and/or cache results from expensive operation such that those +// operations are not repeated unnecessarily. The operations can be +// created as a tree, and replaced dynamically as needed. +// +// All the operations in this module are thread-safe. +// +// # Dependencies and types of caches +// +// This package uses a source/transform/sink model of caches to build +// the dependency tree, and can be used as follows: +// - [Func]: A source cache that recomputes the content every time. +// - [Once]: A source cache that always produces the +// same content, it is only called once. +// - [Transform]: A cache that transforms data from one format to +// another. It's only refreshed when the source changes. +// - [Merge]: A cache that aggregates multiple caches in a map into one. +// It's only refreshed when the source changes. +// - [MergeList]: A cache that aggregates multiple caches in a list into one. +// It's only refreshed when the source changes. +// - [Atomic]: A cache adapter that atomically replaces the source with a new one. +// - [LastSuccess]: A cache adapter that caches the last successful and returns +// it if the next call fails. It extends [Atomic]. +// +// # Etags +// +// Etags in this library is a cache version identifier. It doesn't +// necessarily strictly match to the semantics of http `etags`, but are +// somewhat inspired from it and function with the same principles. +// Hashing the content is a good way to guarantee that your function is +// never going to be called spuriously. In Kubernetes world, this could +// be a `resourceVersion`, this can be an actual etag, a hash, a UUID +// (if the cache always changes), or even a made-up string when the +// content of the cache never changes. +package cached + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// Value is wrapping a value behind a getter for lazy evaluation. +type Value[T any] interface { + Get() (value T, etag string, err error) +} + +// Result is wrapping T and error into a struct for cases where a tuple is more +// convenient or necessary in Golang. +type Result[T any] struct { + Value T + Etag string + Err error +} + +func (r Result[T]) Get() (T, string, error) { + return r.Value, r.Etag, r.Err +} + +// Func wraps a (thread-safe) function as a Value[T]. +func Func[T any](fn func() (T, string, error)) Value[T] { + return valueFunc[T](fn) +} + +type valueFunc[T any] func() (T, string, error) + +func (c valueFunc[T]) Get() (T, string, error) { + return c() +} + +// Static returns constant values. +func Static[T any](value T, etag string) Value[T] { + return Result[T]{Value: value, Etag: etag} +} + +// Merge merges a of cached values. The merge function only gets called if any of +// the dependency has changed. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is run again. +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +// +// Also note that Golang map iteration is not stable. If the mergeFn +// depends on the order iteration to be stable, it will need to +// implement its own sorting or iteration order. +func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, string, error), caches map[K]Value[T]) Value[V] { + list := make([]Value[T], 0, len(caches)) + + // map from index to key + indexes := make(map[int]K, len(caches)) + i := 0 + for k := range caches { + list = append(list, caches[k]) + indexes[i] = k + i++ + } + + return MergeList(func(results []Result[T]) (V, string, error) { + if len(results) != len(indexes) { + panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) + } + m := make(map[K]Result[T], len(results)) + for i := range results { + m[indexes[i]] = results[i] + } + return mergeFn(m) + }, list) +} + +// MergeList merges a list of cached values. The function only gets called if +// any of the dependency has changed. +// +// The benefit of ListMerger over the basic Merger is that caches are +// stored in an ordered list so the order of the cache will be +// preserved in the order of the results passed to the mergeFn. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is reran. +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +func MergeList[T, V any](mergeFn func(results []Result[T]) (V, string, error), delegates []Value[T]) Value[V] { + return &listMerger[T, V]{ + mergeFn: mergeFn, + delegates: delegates, + } +} + +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) (V, string, error) + delegates []Value[T] + cache []Result[T] + result Result[V] +} + +func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { + cacheResults := make([]Result[T], len(c.delegates)) + ch := make(chan struct { + int + Result[T] + }, len(c.delegates)) + for i := range c.delegates { + go func(index int) { + value, etag, err := c.delegates[index].Get() + ch <- struct { + int + Result[T] + }{index, Result[T]{Value: value, Etag: etag, Err: err}} + }(i) + } + for i := 0; i < len(c.delegates); i++ { + res := <-ch + cacheResults[res.int] = res.Result + } + return cacheResults +} + +func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { + if c.cache == nil { + return true + } + if c.result.Err != nil { + return true + } + if len(results) != len(c.cache) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cache))) + } + for i, oldResult := range c.cache { + newResult := results[i] + if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { + return true + } + } + return false +} + +func (c *listMerger[T, V]) Get() (V, string, error) { + c.lock.Lock() + defer c.lock.Unlock() + cacheResults := c.prepareResultsLocked() + if c.needsRunningLocked(cacheResults) { + c.cache = cacheResults + c.result.Value, c.result.Etag, c.result.Err = c.mergeFn(c.cache) + } + return c.result.Value, c.result.Etag, c.result.Err +} + +// Transform the result of another cached value. The transformFn will only be called +// if the source has updated, otherwise, the result will be returned. +// +// If the dependency returned an error before, or it returns an error +// this time, or if the transformerFn failed before, the function is +// reran. +func Transform[T, V any](transformerFn func(T, string, error) (V, string, error), source Value[T]) Value[V] { + return MergeList(func(delegates []Result[T]) (V, string, error) { + if len(delegates) != 1 { + panic(fmt.Errorf("invalid cache for transformer cache: %v", delegates)) + } + return transformerFn(delegates[0].Value, delegates[0].Etag, delegates[0].Err) + }, []Value[T]{source}) +} + +// Once calls Value[T].Get() lazily and only once, even in case of an error result. +func Once[T any](d Value[T]) Value[T] { + return &once[T]{ + data: d, + } +} + +type once[T any] struct { + once sync.Once + data Value[T] + result Result[T] +} + +func (c *once[T]) Get() (T, string, error) { + c.once.Do(func() { + c.result.Value, c.result.Etag, c.result.Err = c.data.Get() + }) + return c.result.Value, c.result.Etag, c.result.Err +} + +// Replaceable extends the Value[T] interface with the ability to change the +// underlying Value[T] after construction. +type Replaceable[T any] interface { + Value[T] + Store(Value[T]) +} + +// Atomic wraps a Value[T] as an atomic value that can be replaced. It implements +// Replaceable[T]. +type Atomic[T any] struct { + value atomic.Pointer[Value[T]] +} + +var _ Replaceable[[]byte] = &Atomic[[]byte]{} + +func (x *Atomic[T]) Store(val Value[T]) { x.value.Store(&val) } +func (x *Atomic[T]) Get() (T, string, error) { return (*x.value.Load()).Get() } + +// LastSuccess calls Value[T].Get(), but hides errors by returning the last +// success if there has been any. +type LastSuccess[T any] struct { + Atomic[T] + success atomic.Pointer[Result[T]] +} + +var _ Replaceable[[]byte] = &LastSuccess[[]byte]{} + +func (c *LastSuccess[T]) Get() (T, string, error) { + success := c.success.Load() + value, etag, err := c.Atomic.Get() + if err == nil { + if success == nil { + c.success.CompareAndSwap(nil, &Result[T]{Value: value, Etag: etag, Err: err}) + } + return value, etag, err + } + + if success != nil { + return success.Value, success.Etag, success.Err + } + + return value, etag, err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go new file mode 100644 index 000000000..e4ce843b0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -0,0 +1,289 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "net/http" + "strings" + + "github.com/emicklei/go-restful/v3" + + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +const ( + // TODO: Make this configurable. + ExtensionPrefix = "x-kubernetes-" + ExtensionV2Schema = ExtensionPrefix + "v2-schema" +) + +// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. +type OpenAPIDefinition struct { + Schema spec.Schema + Dependencies []string +} + +type ReferenceCallback func(path string) spec.Ref + +// GetOpenAPIDefinitions is collection of all definitions. +type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition + +// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, +// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See +// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when +// possible. +type OpenAPIDefinitionGetter interface { + OpenAPIDefinition() *OpenAPIDefinition +} + +type OpenAPIV3DefinitionGetter interface { + OpenAPIV3Definition() *OpenAPIDefinition +} + +type PathHandler interface { + Handle(path string, handler http.Handler) +} + +type PathHandlerByGroupVersion interface { + Handle(path string, handler http.Handler) + HandlePrefix(path string, handler http.Handler) +} + +// Config is set of configuration for openAPI spec generation. +type Config struct { + // List of supported protocols such as https, http, etc. + ProtocolList []string + + // Info is general information about the API. + Info *spec.Info + + // DefaultResponse will be used if an operation does not have any responses listed. It + // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. + DefaultResponse *spec.Response + + // ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object + // that holds responses definitions that can be used across operations. This property does not define + // global responses for all operations. For more info please refer: + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields + ResponseDefinitions map[string]spec.Response + + // CommonResponses will be added as a response to all operation specs. This is a good place to add common + // responses such as authorization failed. + CommonResponses map[int]spec.Response + + // List of webservice's path prefixes to ignore + IgnorePrefixes []string + + // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map + // or any of the models will result in spec generation failure. + GetDefinitions GetOpenAPIDefinitions + + // Provides the definition for all models used by routes. One of GetDefinitions or Definitions must be defined to generate a spec. + // This takes precedent over the GetDefinitions function + Definitions map[string]OpenAPIDefinition + + // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. + // + // Deprecated: GetOperationIDAndTagsFromRoute should be used instead. This cannot be specified if using the new Route + // interface set of funcs. + GetOperationIDAndTags func(r *restful.Route) (string, []string, error) + + // GetOperationIDAndTagsFromRoute returns operation id and tags for a Route. It is an optional function to customize operation IDs. + GetOperationIDAndTagsFromRoute func(r Route) (string, []string, error) + + // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. + // It is an optional function to customize model names. + GetDefinitionName func(name string) (string, spec.Extensions) + + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) + + // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config + // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. + SecurityDefinitions *spec.SecurityDefinitions + + // DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI. + // For most cases, this will be list of acceptable definitions in SecurityDefinitions. + DefaultSecurity []map[string][]string +} + +// OpenAPIV3Config is set of configuration for OpenAPI V3 spec generation. +type OpenAPIV3Config struct { + // Info is general information about the API. + Info *spec.Info + + // DefaultResponse will be used if an operation does not have any responses listed. It + // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. + DefaultResponse *spec3.Response + + // ResponseDefinitions will be added to responses component. This is an object + // that holds responses that can be used across operations. + ResponseDefinitions map[string]*spec3.Response + + // CommonResponses will be added as a response to all operation specs. This is a good place to add common + // responses such as authorization failed. + CommonResponses map[int]*spec3.Response + + // List of webservice's path prefixes to ignore + IgnorePrefixes []string + + // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map + // or any of the models will result in spec generation failure. + // One of GetDefinitions or Definitions must be defined to generate a spec. + GetDefinitions GetOpenAPIDefinitions + + // Provides the definition for all models used by routes. One of GetDefinitions or Definitions must be defined to generate a spec. + // This takes precedent over the GetDefinitions function + Definitions map[string]OpenAPIDefinition + + // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. + // + // Deprecated: GetOperationIDAndTagsFromRoute should be used instead. This cannot be specified if using the new Route + // interface set of funcs. + GetOperationIDAndTags func(r *restful.Route) (string, []string, error) + + // GetOperationIDAndTagsFromRoute returns operation id and tags for a Route. It is an optional function to customize operation IDs. + GetOperationIDAndTagsFromRoute func(r Route) (string, []string, error) + + // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. + // It is an optional function to customize model names. + GetDefinitionName func(name string) (string, spec.Extensions) + + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error) + + // SecuritySchemes is list of all security schemes for OpenAPI service. + SecuritySchemes spec3.SecuritySchemes + + // DefaultSecurity for all operations. + DefaultSecurity []map[string][]string +} + +type typeInfo struct { + name string + format string + zero interface{} +} + +var schemaTypeFormatMap = map[string]typeInfo{ + "uint": {"integer", "int32", 0.}, + "uint8": {"integer", "byte", 0.}, + "uint16": {"integer", "int32", 0.}, + "uint32": {"integer", "int64", 0.}, + "uint64": {"integer", "int64", 0.}, + "int": {"integer", "int32", 0.}, + "int8": {"integer", "byte", 0.}, + "int16": {"integer", "int32", 0.}, + "int32": {"integer", "int32", 0.}, + "int64": {"integer", "int64", 0.}, + "byte": {"integer", "byte", 0}, + "float64": {"number", "double", 0.}, + "float32": {"number", "float", 0.}, + "bool": {"boolean", "", false}, + "time.Time": {"string", "date-time", ""}, + "string": {"string", "", ""}, + "integer": {"integer", "", 0.}, + "number": {"number", "", 0.}, + "boolean": {"boolean", "", false}, + "[]byte": {"string", "byte", ""}, // base64 encoded characters + "interface{}": {"object", "", interface{}(nil)}, +} + +// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are +// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type +// comment (the comment that is added before type definition) will be lost. The spec will still have the property +// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so +// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple +// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. +// Example: +// +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// +// // IntOrString documentation... +// type IntOrString { ... } +// +// Adding IntOrString to this function: +// +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } +// +// Implement OpenAPIDefinitionGetter for IntOrString: +// +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// +// ... +// definitions: +// +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } +func OpenAPITypeFormat(typeName string) (string, string) { + mapped, ok := schemaTypeFormatMap[typeName] + if !ok { + return "", "" + } + return mapped.name, mapped.format +} + +// Returns the zero-value for the given type along with true if the type +// could be found. +func OpenAPIZeroValue(typeName string) (interface{}, bool) { + mapped, ok := schemaTypeFormatMap[typeName] + if !ok { + return nil, false + } + return mapped.zero, true +} + +func EscapeJsonPointer(p string) string { + // Escaping reference name using rfc6901 + p = strings.Replace(p, "~", "~0", -1) + p = strings.Replace(p, "/", "~1", -1) + return p +} + +func EmbedOpenAPIDefinitionIntoV2Extension(main OpenAPIDefinition, embedded OpenAPIDefinition) OpenAPIDefinition { + if main.Schema.Extensions == nil { + main.Schema.Extensions = make(map[string]interface{}) + } + main.Schema.Extensions[ExtensionV2Schema] = embedded.Schema + return main +} + +// GenerateOpenAPIV3OneOfSchema generate the set of schemas that MUST be assigned to SchemaProps.OneOf +func GenerateOpenAPIV3OneOfSchema(types []string) (oneOf []spec.Schema) { + for _, t := range types { + oneOf = append(oneOf, spec.Schema{SchemaProps: spec.SchemaProps{Type: []string{t}}}) + } + return +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/doc.go b/vendor/k8s.io/kube-openapi/pkg/common/doc.go new file mode 100644 index 000000000..2ba6d247b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/common/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package common holds shared code and types between open API code +// generator and spec generator. +package common diff --git a/vendor/k8s.io/kube-openapi/pkg/common/interfaces.go b/vendor/k8s.io/kube-openapi/pkg/common/interfaces.go new file mode 100644 index 000000000..059fc551b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/common/interfaces.go @@ -0,0 +1,88 @@ +package common + +// RouteContainer is the entrypoint for a service, which may contain multiple +// routes under a common path with a common set of path parameters. +type RouteContainer interface { + // RootPath is the path that all contained routes are nested under. + RootPath() string + // PathParameters are common parameters defined in the root path. + PathParameters() []Parameter + // Routes are all routes exposed under the root path. + Routes() []Route +} + +// Route is a logical endpoint of a service. +type Route interface { + // Method defines the HTTP Method. + Method() string + // Path defines the route's endpoint. + Path() string + // OperationName defines a machine-readable ID for the route. + OperationName() string + // Parameters defines the list of accepted parameters. + Parameters() []Parameter + // Description is a human-readable route description. + Description() string + // Consumes defines the consumed content-types. + Consumes() []string + // Produces defines the produced content-types. + Produces() []string + // Metadata allows adding extensions to the generated spec. + Metadata() map[string]interface{} + // RequestPayloadSample defines an example request payload. Can return nil. + RequestPayloadSample() interface{} + // ResponsePayloadSample defines an example response payload. Can return nil. + ResponsePayloadSample() interface{} + // StatusCodeResponses defines a mapping of HTTP Status Codes to the specific response(s). + // Multiple responses with the same HTTP Status Code are acceptable. + StatusCodeResponses() []StatusCodeResponse +} + +// StatusCodeResponse is an explicit response type with an HTTP Status Code. +type StatusCodeResponse interface { + // Code defines the HTTP Status Code. + Code() int + // Message returns the human-readable message. + Message() string + // Model defines an example payload for this response. + Model() interface{} +} + +// Parameter is a Route parameter. +type Parameter interface { + // Name defines the unique-per-route identifier. + Name() string + // Description is the human-readable description of the param. + Description() string + // Required defines if this parameter must be provided. + Required() bool + // Kind defines the type of the parameter itself. + Kind() ParameterKind + // DataType defines the type of data the parameter carries. + DataType() string + // AllowMultiple defines if more than one value can be supplied for the parameter. + AllowMultiple() bool +} + +// ParameterKind is an enum of route parameter types. +type ParameterKind int + +const ( + // PathParameterKind indicates the request parameter type is "path". + PathParameterKind = ParameterKind(iota) + + // QueryParameterKind indicates the request parameter type is "query". + QueryParameterKind + + // BodyParameterKind indicates the request parameter type is "body". + BodyParameterKind + + // HeaderParameterKind indicates the request parameter type is "header". + HeaderParameterKind + + // FormParameterKind indicates the request parameter type is "form". + FormParameterKind + + // UnknownParameterKind indicates the request parameter type has not been specified. + UnknownParameterKind +) diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go new file mode 100644 index 000000000..fc4563488 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -0,0 +1,295 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler3 + +import ( + "bytes" + "crypto/sha512" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + openapi_v3 "github.com/google/gnostic-models/openapiv3" + "github.com/google/uuid" + "github.com/munnerz/goautoneg" + + "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/cached" + "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/spec3" +) + +const ( + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf" + subTypeJSON = "json" +) + +// OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3 +// It maps Discovery paths to their corresponding URLs with a hash parameter included +type OpenAPIV3Discovery struct { + Paths map[string]OpenAPIV3DiscoveryGroupVersion `json:"paths"` +} + +// OpenAPIV3DiscoveryGroupVersion includes information about a group version and URL +// for accessing the OpenAPI. The URL includes a hash parameter to support client side caching +type OpenAPIV3DiscoveryGroupVersion struct { + // Path is an absolute path of an OpenAPI V3 document in the form of /openapi/v3/apis/apps/v1?hash=014fbff9a07c + ServerRelativeURL string `json:"serverRelativeURL"` +} + +func ToV3ProtoBinary(json []byte) ([]byte, error) { + document, err := openapi_v3.ParseDocument(json) + if err != nil { + return nil, err + } + return proto.Marshal(document) +} + +type timedSpec struct { + spec []byte + lastModified time.Time +} + +// This type is protected by the lock on OpenAPIService. +type openAPIV3Group struct { + specCache cached.LastSuccess[*spec3.OpenAPI] + pbCache cached.Value[timedSpec] + jsonCache cached.Value[timedSpec] +} + +func newOpenAPIV3Group() *openAPIV3Group { + o := &openAPIV3Group{} + o.jsonCache = cached.Transform[*spec3.OpenAPI](func(spec *spec3.OpenAPI, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err + } + json, err := json.Marshal(spec) + if err != nil { + return timedSpec{}, "", err + } + return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil + }, &o.specCache) + o.pbCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err + } + proto, err := ToV3ProtoBinary(ts.spec) + if err != nil { + return timedSpec{}, "", err + } + return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil + }, o.jsonCache) + return o +} + +func (o *openAPIV3Group) UpdateSpec(openapi cached.Value[*spec3.OpenAPI]) { + o.specCache.Store(openapi) +} + +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +type OpenAPIService struct { + // Mutex protects the schema map. + mutex sync.Mutex + v3Schema map[string]*openAPIV3Group + + discoveryCache cached.LastSuccess[timedSpec] +} + +func computeETag(data []byte) string { + if data == nil { + return "" + } + return fmt.Sprintf("%X", sha512.Sum512(data)) +} + +func constructServerRelativeURL(gvString, etag string) string { + u := url.URL{Path: path.Join("/openapi/v3", gvString)} + query := url.Values{} + query.Set("hash", etag) + u.RawQuery = query.Encode() + return u.String() +} + +// NewOpenAPIService builds an OpenAPIService starting with the given spec. +func NewOpenAPIService() *OpenAPIService { + o := &OpenAPIService{} + o.v3Schema = make(map[string]*openAPIV3Group) + // We're not locked because we haven't shared the structure yet. + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) + return o +} + +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] { + caches := make(map[string]cached.Value[timedSpec], len(o.v3Schema)) + for gvName, group := range o.v3Schema { + caches[gvName] = group.jsonCache + } + return cached.Merge(func(results map[string]cached.Result[timedSpec]) (timedSpec, string, error) { + discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} + for gvName, result := range results { + if result.Err != nil { + return timedSpec{}, "", result.Err + } + discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ + ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), + } + } + j, err := json.Marshal(discovery) + if err != nil { + return timedSpec{}, "", err + } + return timedSpec{spec: j, lastModified: time.Now()}, computeETag(j), nil + }, caches) +} + +func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]byte, string, time.Time, error) { + o.mutex.Lock() + defer o.mutex.Unlock() + v, ok := o.v3Schema[group] + if !ok { + return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) + } + switch getType { + case subTypeJSON: + ts, etag, err := v.jsonCache.Get() + return ts.spec, etag, ts.lastModified, err + case subTypeProtobuf, subTypeProtobufDeprecated: + ts, etag, err := v.pbCache.Get() + return ts.spec, etag, ts.lastModified, err + default: + return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) + } +} + +// UpdateGroupVersionLazy adds or updates an existing group with the new cached. +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Value[*spec3.OpenAPI]) { + o.mutex.Lock() + defer o.mutex.Unlock() + if _, ok := o.v3Schema[group]; !ok { + o.v3Schema[group] = newOpenAPIV3Group() + // Since there is a new item, we need to re-build the cache map. + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) + } + o.v3Schema[group].UpdateSpec(openapi) +} + +func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { + o.UpdateGroupVersionLazy(group, cached.Static(openapi, uuid.New().String())) +} + +func (o *OpenAPIService) DeleteGroupVersion(group string) { + o.mutex.Lock() + defer o.mutex.Unlock() + delete(o.v3Schema, group) + // Rebuild the merge cache map since the items have changed. + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) +} + +func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { + ts, etag, err := o.discoveryCache.Get() + if err != nil { + klog.Errorf("Error serving discovery: %s", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Etag", strconv.Quote(etag)) + w.Header().Set("Content-Type", "application/json") + http.ServeContent(w, r, "/openapi/v3", ts.lastModified, bytes.NewReader(ts.spec)) +} + +func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { + url := strings.SplitAfterN(r.URL.Path, "/", 4) + group := url[3] + + decipherableFormats := r.Header.Get("Accept") + if decipherableFormats == "" { + decipherableFormats = "*/*" + } + clauses := goautoneg.ParseAccept(decipherableFormats) + w.Header().Add("Vary", "Accept") + + if len(clauses) == 0 { + return + } + + accepted := []struct { + Type string + SubType string + ReturnedContentType string + }{ + {"application", subTypeJSON, "application/" + subTypeJSON}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf}, + } + + for _, clause := range clauses { + for _, accepts := range accepted { + if clause.Type != accepts.Type && clause.Type != "*" { + continue + } + if clause.SubType != accepts.SubType && clause.SubType != "*" { + continue + } + data, etag, lastModified, err := o.getSingleGroupBytes(accepts.SubType, group) + if err != nil { + return + } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag + w.Header().Set("Etag", strconv.Quote(etag)) + + if hash := r.URL.Query().Get("hash"); hash != "" { + if hash != etag { + u := constructServerRelativeURL(group, etag) + http.Redirect(w, r, u, 301) + return + } + // The Vary header is required because the Accept header can + // change the contents returned. This prevents clients from caching + // protobuf as JSON and vice versa. + w.Header().Set("Vary", "Accept") + + // Only set these headers when a hash is given. + w.Header().Set("Cache-Control", "public, immutable") + // Set the Expires directive to the maximum value of one year from the request, + // effectively indicating that the cache never expires. + w.Header().Set("Expires", time.Now().AddDate(1, 0, 0).Format(time.RFC1123)) + } + http.ServeContent(w, r, "", lastModified, bytes.NewReader(data)) + return + } + } + w.WriteHeader(406) + return +} + +func (o *OpenAPIService) RegisterOpenAPIV3VersionedService(servePath string, handler common.PathHandlerByGroupVersion) error { + handler.Handle(servePath, http.HandlerFunc(o.HandleDiscovery)) + handler.HandlePrefix(servePath+"/", http.HandlerFunc(o.HandleGroupVersion)) + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go new file mode 100644 index 000000000..da5485f6a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -0,0 +1,25 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +// Used by tests to selectively disable experimental JSON unmarshaler +var UseOptimizedJSONUnmarshaling bool = true +var UseOptimizedJSONUnmarshalingV3 bool = true + +// Used by tests to selectively disable experimental JSON marshaler +var UseOptimizedJSONMarshaling bool = true +var UseOptimizedJSONMarshalingV3 bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go new file mode 100644 index 000000000..7393bacf7 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "github.com/go-openapi/jsonreference" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// DeterministicMarshal calls the jsonv2 library with the deterministic +// flag in order to have stable marshaling. +func DeterministicMarshal(in any) ([]byte, error) { + return jsonv2.MarshalOptions{Deterministic: true}.Marshal(jsonv2.EncodeOptions{}, in) +} + +// JSONRefFromMap populates a json reference object if the map v contains a $ref key. +func JSONRefFromMap(jsonRef *jsonreference.Ref, v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *jsonRef = ref + } + } + return nil +} + +// SanitizeExtensions sanitizes the input map such that non extension +// keys (non x-*, X-*) keys are dropped from the map. Returns the new +// modified map, or nil if the map is now empty. +func SanitizeExtensions(e map[string]interface{}) map[string]interface{} { + for k := range e { + if !IsExtensionKey(k) { + delete(e, k) + } + } + if len(e) == 0 { + e = nil + } + return e +} + +// IsExtensionKey returns true if the input string is of format x-* or X-* +func IsExtensionKey(k string) bool { + return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' +} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS similarity index 74% rename from vendor/golang.org/x/oauth2/AUTHORS rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS index 15167cd74..2b00ddba0 100644 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS @@ -1,3 +1,3 @@ # This source code refers to The Go Authors for copyright purposes. # The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS similarity index 70% rename from vendor/golang.org/x/oauth2/CONTRIBUTORS rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS index 1c4577e96..1fbd3e976 100644 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS @@ -1,3 +1,3 @@ # This source code was written by the Go contributors. # The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE new file mode 100644 index 000000000..244127300 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2020 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md new file mode 100644 index 000000000..0349adf69 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md @@ -0,0 +1,321 @@ +# JSON Serialization (v2) + +[![GoDev](https://img.shields.io/static/v1?label=godev&message=reference&color=00add8)](https://pkg.go.dev/github.com/go-json-experiment/json) +[![Build Status](https://github.com/go-json-experiment/json/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/go-json-experiment/json/actions) + +This module hosts an experimental implementation of v2 `encoding/json`. +The API is unstable and breaking changes will regularly be made. +Do not depend on this in publicly available modules. + +## Goals and objectives + +* **Mostly backwards compatible:** If possible, v2 should aim to be _mostly_ +compatible with v1 in terms of both API and default behavior to ease migration. +For example, the `Marshal` and `Unmarshal` functions are the most widely used +declarations in the v1 package. It seems sensible for equivalent functionality +in v2 to be named the same and have the same signature. +Behaviorally, we should aim for 95% to 99% backwards compatibility. +We do not aim for 100% compatibility since we want the freedom to break +certain behaviors that are now considered to have been a mistake. +We may provide options that can bring the v2 implementation to 100% compatibility, +but it will not be the default. + +* **More flexible:** There is a +[long list of feature requests](https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+encoding%2Fjson+in%3Atitle). +We should aim to provide the most flexible features that addresses most usages. +We do not want to over fit the v2 API to handle every possible use case. +Ideally, the features provided should be orthogonal in nature such that +any combination of features results in as few surprising edge cases as possible. + +* **More performant:** JSON serialization is widely used and any bit of extra +performance gains will be greatly appreciated. Some rarely used behaviors of v1 +may be dropped in favor of better performance. For example, +despite `Encoder` and `Decoder` operating on an `io.Writer` and `io.Reader`, +they do not operate in a truly streaming manner, +leading to a loss in performance. The v2 implementation should aim to be truly +streaming by default (see [#33714](https://golang.org/issue/33714)). + +* **Easy to use (hard to misuse):** The v2 API should aim to make +the common case easy and the less common case at least possible. +The API should avoid behavior that goes contrary to user expectation, +which may result in subtle bugs (see [#36225](https://golang.org/issue/36225)). + +* **v1 and v2 maintainability:** Since the v1 implementation must stay forever, +it would be beneficial if v1 could be implemented under the hood with v2, +allowing for less maintenance burden in the future. This probably implies that +behavioral changes in v2 relative to v1 need to be exposed as options. + +* **Avoid unsafe:** Standard library packages generally avoid the use of +package `unsafe` even if it could provide a performance boost. +We aim to preserve this property. + +## Expectations + +While this module aims to possibly be the v2 implementation of `encoding/json`, +there is no guarantee that this outcome will occur. As with any major change +to the Go standard library, this will eventually go through the +[Go proposal process](https://github.com/golang/proposal#readme). +At the present moment, this is still in the design and experimentation phase +and is not ready for a formal proposal. + +There are several possible outcomes from this experiment: +1. We determine that a v2 `encoding/json` would not provide sufficient benefit +over the existing v1 `encoding/json` package. Thus, we abandon this effort. +2. We propose a v2 `encoding/json` design, but it is rejected in favor of some +other design that is considered superior. +3. We propose a v2 `encoding/json` design, but rather than adding an entirely +new v2 `encoding/json` package, we decide to merge its functionality into +the existing v1 `encoding/json` package. +4. We propose a v2 `encoding/json` design and it is accepted, resulting in +its addition to the standard library. +5. Some other unforeseen outcome (among the infinite number of possibilities). + +## Development + +This module is primarily developed by +[@dsnet](https://github.com/dsnet), +[@mvdan](https://github.com/mvdan), and +[@johanbrandhorst](https://github.com/johanbrandhorst) +with feedback provided by +[@rogpeppe](https://github.com/rogpeppe), +[@ChrisHines](https://github.com/ChrisHines), and +[@rsc](https://github.com/rsc). + +Discussion about semantics occur semi-regularly, where a +[record of past meetings can be found here](https://docs.google.com/document/d/1rovrOTd-wTawGMPPlPuKhwXaYBg9VszTXR9AQQL5LfI/edit?usp=sharing). + +## Design overview + +This package aims to provide a clean separation between syntax and semantics. +Syntax deals with the structural representation of JSON (as specified in +[RFC 4627](https://tools.ietf.org/html/rfc4627), +[RFC 7159](https://tools.ietf.org/html/rfc7159), +[RFC 7493](https://tools.ietf.org/html/rfc7493), +[RFC 8259](https://tools.ietf.org/html/rfc8259), and +[RFC 8785](https://tools.ietf.org/html/rfc8785)). +Semantics deals with the meaning of syntactic data as usable application data. + +The `Encoder` and `Decoder` types are streaming tokenizers concerned with the +packing or parsing of JSON data. They operate on `Token` and `RawValue` types +which represent the common data structures that are representable in JSON. +`Encoder` and `Decoder` do not aim to provide any interpretation of the data. + +Functions like `Marshal`, `MarshalFull`, `MarshalNext`, `Unmarshal`, +`UnmarshalFull`, and `UnmarshalNext` provide semantic meaning by correlating +any arbitrary Go type with some JSON representation of that type (as stored in +data types like `[]byte`, `io.Writer`, `io.Reader`, `Encoder`, or `Decoder`). + +![API overview](api.png) + +This diagram provides a high-level overview of the v2 `json` package. +Purple blocks represent types, while blue blocks represent functions or methods. +The arrows and their direction represent the approximate flow of data. +The bottom half of the diagram contains functionality that is only concerned +with syntax, while the upper half contains functionality that assigns +semantic meaning to syntactic data handled by the bottom half. + +In contrast to v1 `encoding/json`, options are represented as separate types +rather than being setter methods on the `Encoder` or `Decoder` types. + +## Behavior changes + +The v2 `json` package changes the default behavior of `Marshal` and `Unmarshal` +relative to the v1 `json` package to be more sensible. +Some of these behavior changes have options and workarounds to opt into +behavior similar to what v1 provided. + +This table shows an overview of the changes: + +| v1 | v2 | Details | +| -- | -- | ------- | +| JSON object members are unmarshaled into a Go struct using a **case-insensitive name match**. | JSON object members are unmarshaled into a Go struct using a **case-sensitive name match**. | [CaseSensitivity](/diff_test.go#:~:text=TestCaseSensitivity) | +| When marshaling a Go struct, a struct field marked as `omitempty` is omitted if **the field value is an empty Go value**, which is defined as false, 0, a nil pointer, a nil interface value, and any empty array, slice, map, or string. | When marshaling a Go struct, a struct field marked as `omitempty` is omitted if **the field value would encode as an empty JSON value**, which is defined as a JSON null, or an empty JSON string, object, or array. | [OmitEmptyOption](/diff_test.go#:~:text=TestOmitEmptyOption) | +| The `string` option **does affect** Go bools. | The `string` option **does not affect** Go bools. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| The `string` option **does not recursively affect** sub-values of the Go field value. | The `string` option **does recursively affect** sub-values of the Go field value. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| The `string` option **sometimes accepts** a JSON null escaped within a JSON string. | The `string` option **never accepts** a JSON null escaped within a JSON string. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| A nil Go slice is marshaled as a **JSON null**. | A nil Go slice is marshaled as an **empty JSON array**. | [NilSlicesAndMaps](/diff_test.go#:~:text=TestNilSlicesAndMaps) | +| A nil Go map is marshaled as a **JSON null**. | A nil Go map is marshaled as an **empty JSON object**. | [NilSlicesAndMaps](/diff_test.go#:~:text=TestNilSlicesAndMaps) | +| A Go array may be unmarshaled from a **JSON array of any length**. | A Go array must be unmarshaled from a **JSON array of the same length**. | [Arrays](/diff_test.go#:~:text=Arrays) | +| A Go byte array is represented as a **JSON array of JSON numbers**. | A Go byte array is represented as a **Base64-encoded JSON string**. | [ByteArrays](/diff_test.go#:~:text=TestByteArrays) | +| `MarshalJSON` and `UnmarshalJSON` methods declared on a pointer receiver are **inconsistently called**. | `MarshalJSON` and `UnmarshalJSON` methods declared on a pointer receiver are **consistently called**. | [PointerReceiver](/diff_test.go#:~:text=TestPointerReceiver) | +| A Go map is marshaled in a **deterministic order**. | A Go map is marshaled in a **non-deterministic order**. | [MapDeterminism](/diff_test.go#:~:text=TestMapDeterminism) | +| JSON strings are encoded **with HTML-specific characters being escaped**. | JSON strings are encoded **without any characters being escaped** (unless necessary). | [EscapeHTML](/diff_test.go#:~:text=TestEscapeHTML) | +| When marshaling, invalid UTF-8 within a Go string **are silently replaced**. | When marshaling, invalid UTF-8 within a Go string **results in an error**. | [InvalidUTF8](/diff_test.go#:~:text=TestInvalidUTF8) | +| When unmarshaling, invalid UTF-8 within a JSON string **are silently replaced**. | When unmarshaling, invalid UTF-8 within a JSON string **results in an error**. | [InvalidUTF8](/diff_test.go#:~:text=TestInvalidUTF8) | +| When marshaling, **an error does not occur** if the output JSON value contains objects with duplicate names. | When marshaling, **an error does occur** if the output JSON value contains objects with duplicate names. | [DuplicateNames](/diff_test.go#:~:text=TestDuplicateNames) | +| When unmarshaling, **an error does not occur** if the input JSON value contains objects with duplicate names. | When unmarshaling, **an error does occur** if the input JSON value contains objects with duplicate names. | [DuplicateNames](/diff_test.go#:~:text=TestDuplicateNames) | +| Unmarshaling a JSON null into a non-empty Go value **inconsistently clears the value or does nothing**. | Unmarshaling a JSON null into a non-empty Go value **always clears the value**. | [MergeNull](/diff_test.go#:~:text=TestMergeNull) | +| Unmarshaling a JSON value into a non-empty Go value **follows inconsistent and bizarre behavior**. | Unmarshaling a JSON value into a non-empty Go value **always merges if the input is an object, and otherwise replaces**. | [MergeComposite](/diff_test.go#:~:text=TestMergeComposite) | +| A `time.Duration` is represented as a **JSON number containing the decimal number of nanoseconds**. | A `time.Duration` is represented as a **JSON string containing the formatted duration (e.g., "1h2m3.456s")**. | [TimeDurations](/diff_test.go#:~:text=TestTimeDurations) | +| Unmarshaling a JSON number into a Go float beyond its representation **results in an error**. | Unmarshaling a JSON number into a Go float beyond its representation **uses the closest representable value (e.g., ±`math.MaxFloat`)**. | [MaxFloats](/diff_test.go#:~:text=TestMaxFloats) | +| A Go struct with only unexported fields **can be serialized**. | A Go struct with only unexported fields **cannot be serialized**. | [EmptyStructs](/diff_test.go#:~:text=TestEmptyStructs) | +| A Go struct that embeds an unexported struct type **can sometimes be serialized**. | A Go struct that embeds an unexported struct type **cannot be serialized**. | [EmbedUnexported](/diff_test.go#:~:text=TestEmbedUnexported) | + +See [diff_test.go](/diff_test.go) for details about every change. + +## Performance + +One of the goals of the v2 module is to be more performant than v1. + +Each of the charts below show the performance across +several different JSON implementations: + +* `JSONv1` is `encoding/json` at `v1.18.2` +* `JSONv2` is `github.com/go-json-experiment/json` at `v0.0.0-20220524042235-dd8be80fc4a7` +* `JSONIterator` is `github.com/json-iterator/go` at `v1.1.12` +* `SegmentJSON` is `github.com/segmentio/encoding/json` at `v0.3.5` +* `GoJSON` is `github.com/goccy/go-json` at `v0.9.7` +* `SonicJSON` is `github.com/bytedance/sonic` at `v1.3.0` + +Benchmarks were run across various datasets: + +* `CanadaGeometry` is a GeoJSON (RFC 7946) representation of Canada. + It contains many JSON arrays of arrays of two-element arrays of numbers. +* `CITMCatalog` contains many JSON objects using numeric names. +* `SyntheaFHIR` is sample JSON data from the healthcare industry. + It contains many nested JSON objects with mostly string values, + where the set of unique string values is relatively small. +* `TwitterStatus` is the JSON response from the Twitter API. + It contains a mix of all different JSON kinds, where string values + are a mix of both single-byte ASCII and multi-byte Unicode. +* `GolangSource` is a simple tree representing the Go source code. + It contains many nested JSON objects, each with the same schema. +* `StringUnicode` contains many strings with multi-byte Unicode runes. + +All of the implementations other than `JSONv1` and `JSONv2` make +extensive use of `unsafe`. As such, we expect those to generally be faster, +but at the cost of memory and type safety. `SonicJSON` goes a step even further +and uses just-in-time compilation to generate machine code specialized +for the Go type being marshaled or unmarshaled. +Also, `SonicJSON` does not validate JSON strings for valid UTF-8, +and so gains a notable performance boost on datasets with multi-byte Unicode. +Benchmarks are performed based on the default marshal and unmarshal behavior +of each package. Note that `JSONv2` aims to be safe and correct by default, +which may not be the most performant strategy. + +`JSONv2` has several semantic changes relative to `JSONv1` that +impacts performance: + +1. When marshaling, `JSONv2` no longer sorts the keys of a Go map. + This will improve performance. +2. When marshaling or unmarshaling, `JSONv2` always checks + to make sure JSON object names are unique. + This will hurt performance, but is more correct. +3. When marshaling or unmarshaling, `JSONv2` always + shallow copies the underlying value for a Go interface and + shallow copies the key and value for entries in a Go map. + This is done to keep the value as addressable so that `JSONv2` can + call methods and functions that operate on a pointer receiver. + This will hurt performance, but is more correct. + +All of the charts are unit-less since the values are normalized +relative to `JSONv1`, which is why `JSONv1` always has a value of 1. +A lower value is better (i.e., runs faster). + +Benchmarks were performed on an AMD Ryzen 9 5900X. + +The code for the benchmarks is located at +https://github.com/go-json-experiment/jsonbench. + +### Marshal Performance + +#### Concrete types + +![Benchmark Marshal Concrete](benchmark-marshal-concrete.png) + +* This compares marshal performance when serializing + [from concrete types](/testdata_test.go). +* The `JSONv1` implementation is close to optimal (without the use of `unsafe`). +* Relative to `JSONv1`, `JSONv2` is generally as fast or slightly faster. +* Relative to `JSONIterator`, `JSONv2` is up to 1.3x faster. +* Relative to `SegmentJSON`, `JSONv2` is up to 1.8x slower. +* Relative to `GoJSON`, `JSONv2` is up to 2.0x slower. +* Relative to `SonicJSON`, `JSONv2` is about 1.8x to 3.2x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* For `JSONv1` and `JSONv2`, marshaling from concrete types is + mostly limited by the performance of Go reflection. + +#### Interface types + +![Benchmark Marshal Interface](benchmark-marshal-interface.png) + +* This compares marshal performance when serializing from + `any`, `map[string]any`, and `[]any` types. +* Relative to `JSONv1`, `JSONv2` is about 1.5x to 4.2x faster. +* Relative to `JSONIterator`, `JSONv2` is about 1.1x to 2.4x faster. +* Relative to `SegmentJSON`, `JSONv2` is about 1.2x to 1.8x faster. +* Relative to `GoJSON`, `JSONv2` is about 1.1x to 2.5x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* `JSONv2` is faster than the alternatives. + One advantange is because it does not sort the keys for a `map[string]any`, + while alternatives (except `SonicJSON` and `JSONIterator`) do sort the keys. + +#### RawValue types + +![Benchmark Marshal Rawvalue](benchmark-marshal-rawvalue.png) + +* This compares performance when marshaling from a `json.RawValue`. + This mostly exercises the underlying encoder and + hides the cost of Go reflection. +* Relative to `JSONv1`, `JSONv2` is about 3.5x to 7.8x faster. +* `JSONIterator` is blazingly fast because + [it does not validate whether the raw value is valid](https://go.dev/play/p/bun9IXQCKRe) + and simply copies it to the output. +* Relative to `SegmentJSON`, `JSONv2` is about 1.5x to 2.7x faster. +* Relative to `GoJSON`, `JSONv2` is up to 2.2x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x faster. +* Aside from `JSONIterator`, `JSONv2` is generally the fastest. + +### Unmarshal Performance + +#### Concrete types + +![Benchmark Unmarshal Concrete](benchmark-unmarshal-concrete.png) + +* This compares unmarshal performance when deserializing + [into concrete types](/testdata_test.go). +* Relative to `JSONv1`, `JSONv2` is about 1.8x to 5.7x faster. +* Relative to `JSONIterator`, `JSONv2` is about 1.1x to 1.6x slower. +* Relative to `SegmentJSON`, `JSONv2` is up to 2.5x slower. +* Relative to `GoJSON`, `JSONv2` is about 1.4x to 2.1x slower. +* Relative to `SonicJSON`, `JSONv2` is up to 4.0x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* For `JSONv1` and `JSONv2`, unmarshaling into concrete types is + mostly limited by the performance of Go reflection. + +#### Interface types + +![Benchmark Unmarshal Interface](benchmark-unmarshal-interface.png) + +* This compares unmarshal performance when deserializing into + `any`, `map[string]any`, and `[]any` types. +* Relative to `JSONv1`, `JSONv2` is about 1.tx to 4.3x faster. +* Relative to `JSONIterator`, `JSONv2` is up to 1.5x faster. +* Relative to `SegmentJSON`, `JSONv2` is about 1.5 to 3.7x faster. +* Relative to `GoJSON`, `JSONv2` is up to 1.3x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* Aside from `SonicJSON`, `JSONv2` is generally just as fast + or faster than all the alternatives. + +#### RawValue types + +![Benchmark Unmarshal Rawvalue](benchmark-unmarshal-rawvalue.png) + +* This compares performance when unmarshaling into a `json.RawValue`. + This mostly exercises the underlying decoder and + hides away most of the cost of Go reflection. +* Relative to `JSONv1`, `JSONv2` is about 8.3x to 17.0x faster. +* Relative to `JSONIterator`, `JSONv2` is up to 2.0x faster. +* Relative to `SegmentJSON`, `JSONv2` is up to 1.6x faster or 1.7x slower. +* Relative to `GoJSON`, `JSONv2` is up to 1.9x faster or 2.1x slower. +* Relative to `SonicJSON`, `JSONv2` is up to 2.0x faster + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* `JSONv1` takes a + [lexical scanning approach](https://talks.golang.org/2011/lex.slide#1), + which performs a virtual function call for every byte of input. + In contrast, `JSONv2` makes heavy use of iterative and linear parsing logic + (with extra complexity to resume parsing when encountering segmented buffers). +* `JSONv2` is comparable to the alternatives that use `unsafe`. + Generally it is faster, but sometimes it is slower. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go new file mode 100644 index 000000000..e6c6216ff --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go @@ -0,0 +1,513 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "io" + "reflect" + "sync" +) + +// MarshalOptions configures how Go data is serialized as JSON data. +// The zero value is equivalent to the default marshal settings. +type MarshalOptions struct { + requireKeyedLiterals + nonComparable + + // Marshalers is a list of type-specific marshalers to use. + Marshalers *Marshalers + + // StringifyNumbers specifies that numeric Go types should be serialized + // as a JSON string containing the equivalent JSON number value. + // + // According to RFC 8259, section 6, a JSON implementation may choose to + // limit the representation of a JSON number to an IEEE 754 binary64 value. + // This may cause decoders to lose precision for int64 and uint64 types. + // Escaping JSON numbers as a JSON string preserves the exact precision. + StringifyNumbers bool + + // DiscardUnknownMembers specifies that marshaling should ignore any + // JSON object members stored in Go struct fields dedicated to storing + // unknown JSON object members. + DiscardUnknownMembers bool + + // Deterministic specifies that the same input value will be serialized + // as the exact same output bytes. Different processes of + // the same program will serialize equal values to the same bytes, + // but different versions of the same program are not guaranteed + // to produce the exact same sequence of bytes. + Deterministic bool + + // formatDepth is the depth at which we respect the format flag. + formatDepth int + // format is custom formatting for the value at the specified depth. + format string +} + +// Marshal serializes a Go value as a []byte with default options. +// It is a thin wrapper over MarshalOptions.Marshal. +func Marshal(in any) (out []byte, err error) { + return MarshalOptions{}.Marshal(EncodeOptions{}, in) +} + +// MarshalFull serializes a Go value into an io.Writer with default options. +// It is a thin wrapper over MarshalOptions.MarshalFull. +func MarshalFull(out io.Writer, in any) error { + return MarshalOptions{}.MarshalFull(EncodeOptions{}, out, in) +} + +// Marshal serializes a Go value as a []byte according to the provided +// marshal and encode options. It does not terminate the output with a newline. +// See MarshalNext for details about the conversion of a Go value into JSON. +func (mo MarshalOptions) Marshal(eo EncodeOptions, in any) (out []byte, err error) { + enc := getBufferedEncoder(eo) + defer putBufferedEncoder(enc) + enc.options.omitTopLevelNewline = true + err = mo.MarshalNext(enc, in) + // TODO(https://go.dev/issue/45038): Use bytes.Clone. + return append([]byte(nil), enc.buf...), err +} + +// MarshalFull serializes a Go value into an io.Writer according to the provided +// marshal and encode options. It does not terminate the output with a newline. +// See MarshalNext for details about the conversion of a Go value into JSON. +func (mo MarshalOptions) MarshalFull(eo EncodeOptions, out io.Writer, in any) error { + enc := getStreamingEncoder(out, eo) + defer putStreamingEncoder(enc) + enc.options.omitTopLevelNewline = true + err := mo.MarshalNext(enc, in) + return err +} + +// MarshalNext encodes a Go value as the next JSON value according to +// the provided marshal options. +// +// Type-specific marshal functions and methods take precedence +// over the default representation of a value. +// Functions or methods that operate on *T are only called when encoding +// a value of type T (by taking its address) or a non-nil value of *T. +// MarshalNext ensures that a value is always addressable +// (by boxing it on the heap if necessary) so that +// these functions and methods can be consistently called. For performance, +// it is recommended that MarshalNext be passed a non-nil pointer to the value. +// +// The input value is encoded as JSON according the following rules: +// +// - If any type-specific functions in MarshalOptions.Marshalers match +// the value type, then those functions are called to encode the value. +// If all applicable functions return SkipFunc, +// then the value is encoded according to subsequent rules. +// +// - If the value type implements MarshalerV2, +// then the MarshalNextJSON method is called to encode the value. +// +// - If the value type implements MarshalerV1, +// then the MarshalJSON method is called to encode the value. +// +// - If the value type implements encoding.TextMarshaler, +// then the MarshalText method is called to encode the value and +// subsequently encode its result as a JSON string. +// +// - Otherwise, the value is encoded according to the value's type +// as described in detail below. +// +// Most Go types have a default JSON representation. +// Certain types support specialized formatting according to +// a format flag optionally specified in the Go struct tag +// for the struct field that contains the current value +// (see the “JSON Representation of Go structs” section for more details). +// +// The representation of each type is as follows: +// +// - A Go boolean is encoded as a JSON boolean (e.g., true or false). +// It does not support any custom format flags. +// +// - A Go string is encoded as a JSON string. +// It does not support any custom format flags. +// +// - A Go []byte or [N]byte is encoded as a JSON string containing +// the binary value encoded using RFC 4648. +// If the format is "base64" or unspecified, then this uses RFC 4648, section 4. +// If the format is "base64url", then this uses RFC 4648, section 5. +// If the format is "base32", then this uses RFC 4648, section 6. +// If the format is "base32hex", then this uses RFC 4648, section 7. +// If the format is "base16" or "hex", then this uses RFC 4648, section 8. +// If the format is "array", then the bytes value is encoded as a JSON array +// where each byte is recursively JSON-encoded as each JSON array element. +// +// - A Go integer is encoded as a JSON number without fractions or exponents. +// If MarshalOptions.StringifyNumbers is specified, then the JSON number is +// encoded within a JSON string. It does not support any custom format +// flags. +// +// - A Go float is encoded as a JSON number. +// If MarshalOptions.StringifyNumbers is specified, +// then the JSON number is encoded within a JSON string. +// If the format is "nonfinite", then NaN, +Inf, and -Inf are encoded as +// the JSON strings "NaN", "Infinity", and "-Infinity", respectively. +// Otherwise, the presence of non-finite numbers results in a SemanticError. +// +// - A Go map is encoded as a JSON object, where each Go map key and value +// is recursively encoded as a name and value pair in the JSON object. +// The Go map key must encode as a JSON string, otherwise this results +// in a SemanticError. When encoding keys, MarshalOptions.StringifyNumbers +// is automatically applied so that numeric keys encode as JSON strings. +// The Go map is traversed in a non-deterministic order. +// For deterministic encoding, consider using RawValue.Canonicalize. +// If the format is "emitnull", then a nil map is encoded as a JSON null. +// Otherwise by default, a nil map is encoded as an empty JSON object. +// +// - A Go struct is encoded as a JSON object. +// See the “JSON Representation of Go structs” section +// in the package-level documentation for more details. +// +// - A Go slice is encoded as a JSON array, where each Go slice element +// is recursively JSON-encoded as the elements of the JSON array. +// If the format is "emitnull", then a nil slice is encoded as a JSON null. +// Otherwise by default, a nil slice is encoded as an empty JSON array. +// +// - A Go array is encoded as a JSON array, where each Go array element +// is recursively JSON-encoded as the elements of the JSON array. +// The JSON array length is always identical to the Go array length. +// It does not support any custom format flags. +// +// - A Go pointer is encoded as a JSON null if nil, otherwise it is +// the recursively JSON-encoded representation of the underlying value. +// Format flags are forwarded to the encoding of the underlying value. +// +// - A Go interface is encoded as a JSON null if nil, otherwise it is +// the recursively JSON-encoded representation of the underlying value. +// It does not support any custom format flags. +// +// - A Go time.Time is encoded as a JSON string containing the timestamp +// formatted in RFC 3339 with nanosecond resolution. +// If the format matches one of the format constants declared +// in the time package (e.g., RFC1123), then that format is used. +// Otherwise, the format is used as-is with time.Time.Format if non-empty. +// +// - A Go time.Duration is encoded as a JSON string containing the duration +// formatted according to time.Duration.String. +// If the format is "nanos", it is encoded as a JSON number +// containing the number of nanoseconds in the duration. +// +// - All other Go types (e.g., complex numbers, channels, and functions) +// have no default representation and result in a SemanticError. +// +// JSON cannot represent cyclic data structures and +// MarshalNext does not handle them. +// Passing cyclic structures will result in an error. +func (mo MarshalOptions) MarshalNext(out *Encoder, in any) error { + v := reflect.ValueOf(in) + if !v.IsValid() || (v.Kind() == reflect.Pointer && v.IsNil()) { + return out.WriteToken(Null) + } + // Shallow copy non-pointer values to obtain an addressable value. + // It is beneficial to performance to always pass pointers to avoid this. + if v.Kind() != reflect.Pointer { + v2 := reflect.New(v.Type()) + v2.Elem().Set(v) + v = v2 + } + va := addressableValue{v.Elem()} // dereferenced pointer is always addressable + t := va.Type() + + // Lookup and call the marshal function for this type. + marshal := lookupArshaler(t).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t) + } + if err := marshal(mo, out, va); err != nil { + if !out.options.AllowDuplicateNames { + out.tokens.invalidateDisabledNamespaces() + } + return err + } + return nil +} + +// UnmarshalOptions configures how JSON data is deserialized as Go data. +// The zero value is equivalent to the default unmarshal settings. +type UnmarshalOptions struct { + requireKeyedLiterals + nonComparable + + // Unmarshalers is a list of type-specific unmarshalers to use. + Unmarshalers *Unmarshalers + + // StringifyNumbers specifies that numeric Go types can be deserialized + // from either a JSON number or a JSON string containing a JSON number + // without any surrounding whitespace. + StringifyNumbers bool + + // RejectUnknownMembers specifies that unknown members should be rejected + // when unmarshaling a JSON object, regardless of whether there is a field + // to store unknown members. + RejectUnknownMembers bool + + // formatDepth is the depth at which we respect the format flag. + formatDepth int + // format is custom formatting for the value at the specified depth. + format string +} + +// Unmarshal deserializes a Go value from a []byte with default options. +// It is a thin wrapper over UnmarshalOptions.Unmarshal. +func Unmarshal(in []byte, out any) error { + return UnmarshalOptions{}.Unmarshal(DecodeOptions{}, in, out) +} + +// UnmarshalFull deserializes a Go value from an io.Reader with default options. +// It is a thin wrapper over UnmarshalOptions.UnmarshalFull. +func UnmarshalFull(in io.Reader, out any) error { + return UnmarshalOptions{}.UnmarshalFull(DecodeOptions{}, in, out) +} + +// Unmarshal deserializes a Go value from a []byte according to the +// provided unmarshal and decode options. The output must be a non-nil pointer. +// The input must be a single JSON value with optional whitespace interspersed. +// See UnmarshalNext for details about the conversion of JSON into a Go value. +func (uo UnmarshalOptions) Unmarshal(do DecodeOptions, in []byte, out any) error { + dec := getBufferedDecoder(in, do) + defer putBufferedDecoder(dec) + return uo.unmarshalFull(dec, out) +} + +// UnmarshalFull deserializes a Go value from an io.Reader according to the +// provided unmarshal and decode options. The output must be a non-nil pointer. +// The input must be a single JSON value with optional whitespace interspersed. +// It consumes the entirety of io.Reader until io.EOF is encountered. +// See UnmarshalNext for details about the conversion of JSON into a Go value. +func (uo UnmarshalOptions) UnmarshalFull(do DecodeOptions, in io.Reader, out any) error { + dec := getStreamingDecoder(in, do) + defer putStreamingDecoder(dec) + return uo.unmarshalFull(dec, out) +} +func (uo UnmarshalOptions) unmarshalFull(in *Decoder, out any) error { + switch err := uo.UnmarshalNext(in, out); err { + case nil: + return in.checkEOF() + case io.EOF: + return io.ErrUnexpectedEOF + default: + return err + } +} + +// UnmarshalNext decodes the next JSON value into a Go value according to +// the provided unmarshal options. The output must be a non-nil pointer. +// +// Type-specific unmarshal functions and methods take precedence +// over the default representation of a value. +// Functions or methods that operate on *T are only called when decoding +// a value of type T (by taking its address) or a non-nil value of *T. +// UnmarshalNext ensures that a value is always addressable +// (by boxing it on the heap if necessary) so that +// these functions and methods can be consistently called. +// +// The input is decoded into the output according the following rules: +// +// - If any type-specific functions in UnmarshalOptions.Unmarshalers match +// the value type, then those functions are called to decode the JSON +// value. If all applicable functions return SkipFunc, +// then the input is decoded according to subsequent rules. +// +// - If the value type implements UnmarshalerV2, +// then the UnmarshalNextJSON method is called to decode the JSON value. +// +// - If the value type implements UnmarshalerV1, +// then the UnmarshalJSON method is called to decode the JSON value. +// +// - If the value type implements encoding.TextUnmarshaler, +// then the input is decoded as a JSON string and +// the UnmarshalText method is called with the decoded string value. +// This fails with a SemanticError if the input is not a JSON string. +// +// - Otherwise, the JSON value is decoded according to the value's type +// as described in detail below. +// +// Most Go types have a default JSON representation. +// Certain types support specialized formatting according to +// a format flag optionally specified in the Go struct tag +// for the struct field that contains the current value +// (see the “JSON Representation of Go structs” section for more details). +// A JSON null may be decoded into every supported Go value where +// it is equivalent to storing the zero value of the Go value. +// If the input JSON kind is not handled by the current Go value type, +// then this fails with a SemanticError. Unless otherwise specified, +// the decoded value replaces any pre-existing value. +// +// The representation of each type is as follows: +// +// - A Go boolean is decoded from a JSON boolean (e.g., true or false). +// It does not support any custom format flags. +// +// - A Go string is decoded from a JSON string. +// It does not support any custom format flags. +// +// - A Go []byte or [N]byte is decoded from a JSON string +// containing the binary value encoded using RFC 4648. +// If the format is "base64" or unspecified, then this uses RFC 4648, section 4. +// If the format is "base64url", then this uses RFC 4648, section 5. +// If the format is "base32", then this uses RFC 4648, section 6. +// If the format is "base32hex", then this uses RFC 4648, section 7. +// If the format is "base16" or "hex", then this uses RFC 4648, section 8. +// If the format is "array", then the Go slice or array is decoded from a +// JSON array where each JSON element is recursively decoded for each byte. +// When decoding into a non-nil []byte, the slice length is reset to zero +// and the decoded input is appended to it. +// When decoding into a [N]byte, the input must decode to exactly N bytes, +// otherwise it fails with a SemanticError. +// +// - A Go integer is decoded from a JSON number. +// It may also be decoded from a JSON string containing a JSON number +// if UnmarshalOptions.StringifyNumbers is specified. +// It fails with a SemanticError if the JSON number +// has a fractional or exponent component. +// It also fails if it overflows the representation of the Go integer type. +// It does not support any custom format flags. +// +// - A Go float is decoded from a JSON number. +// It may also be decoded from a JSON string containing a JSON number +// if UnmarshalOptions.StringifyNumbers is specified. +// The JSON number is parsed as the closest representable Go float value. +// If the format is "nonfinite", then the JSON strings +// "NaN", "Infinity", and "-Infinity" are decoded as NaN, +Inf, and -Inf. +// Otherwise, the presence of such strings results in a SemanticError. +// +// - A Go map is decoded from a JSON object, +// where each JSON object name and value pair is recursively decoded +// as the Go map key and value. When decoding keys, +// UnmarshalOptions.StringifyNumbers is automatically applied so that +// numeric keys can decode from JSON strings. Maps are not cleared. +// If the Go map is nil, then a new map is allocated to decode into. +// If the decoded key matches an existing Go map entry, the entry value +// is reused by decoding the JSON object value into it. +// The only supported format is "emitnull" and has no effect when decoding. +// +// - A Go struct is decoded from a JSON object. +// See the “JSON Representation of Go structs” section +// in the package-level documentation for more details. +// +// - A Go slice is decoded from a JSON array, where each JSON element +// is recursively decoded and appended to the Go slice. +// Before appending into a Go slice, a new slice is allocated if it is nil, +// otherwise the slice length is reset to zero. +// The only supported format is "emitnull" and has no effect when decoding. +// +// - A Go array is decoded from a JSON array, where each JSON array element +// is recursively decoded as each corresponding Go array element. +// Each Go array element is zeroed before decoding into it. +// It fails with a SemanticError if the JSON array does not contain +// the exact same number of elements as the Go array. +// It does not support any custom format flags. +// +// - A Go pointer is decoded based on the JSON kind and underlying Go type. +// If the input is a JSON null, then this stores a nil pointer. +// Otherwise, it allocates a new underlying value if the pointer is nil, +// and recursively JSON decodes into the underlying value. +// Format flags are forwarded to the decoding of the underlying type. +// +// - A Go interface is decoded based on the JSON kind and underlying Go type. +// If the input is a JSON null, then this stores a nil interface value. +// Otherwise, a nil interface value of an empty interface type is initialized +// with a zero Go bool, string, float64, map[string]any, or []any if the +// input is a JSON boolean, string, number, object, or array, respectively. +// If the interface value is still nil, then this fails with a SemanticError +// since decoding could not determine an appropriate Go type to decode into. +// For example, unmarshaling into a nil io.Reader fails since +// there is no concrete type to populate the interface value with. +// Otherwise an underlying value exists and it recursively decodes +// the JSON input into it. It does not support any custom format flags. +// +// - A Go time.Time is decoded from a JSON string containing the time +// formatted in RFC 3339 with nanosecond resolution. +// If the format matches one of the format constants declared in +// the time package (e.g., RFC1123), then that format is used for parsing. +// Otherwise, the format is used as-is with time.Time.Parse if non-empty. +// +// - A Go time.Duration is decoded from a JSON string by +// passing the decoded string to time.ParseDuration. +// If the format is "nanos", it is instead decoded from a JSON number +// containing the number of nanoseconds in the duration. +// +// - All other Go types (e.g., complex numbers, channels, and functions) +// have no default representation and result in a SemanticError. +// +// In general, unmarshaling follows merge semantics (similar to RFC 7396) +// where the decoded Go value replaces the destination value +// for any JSON kind other than an object. +// For JSON objects, the input object is merged into the destination value +// where matching object members recursively apply merge semantics. +func (uo UnmarshalOptions) UnmarshalNext(in *Decoder, out any) error { + v := reflect.ValueOf(out) + if !v.IsValid() || v.Kind() != reflect.Pointer || v.IsNil() { + var t reflect.Type + if v.IsValid() { + t = v.Type() + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + } + err := errors.New("value must be passed as a non-nil pointer reference") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + va := addressableValue{v.Elem()} // dereferenced pointer is always addressable + t := va.Type() + + // Lookup and call the unmarshal function for this type. + unmarshal := lookupArshaler(t).unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t) + } + if err := unmarshal(uo, in, va); err != nil { + if !in.options.AllowDuplicateNames { + in.tokens.invalidateDisabledNamespaces() + } + return err + } + return nil +} + +// addressableValue is a reflect.Value that is guaranteed to be addressable +// such that calling the Addr and Set methods do not panic. +// +// There is no compile magic that enforces this property, +// but rather the need to construct this type makes it easier to examine each +// construction site to ensure that this property is upheld. +type addressableValue struct{ reflect.Value } + +// newAddressableValue constructs a new addressable value of type t. +func newAddressableValue(t reflect.Type) addressableValue { + return addressableValue{reflect.New(t).Elem()} +} + +// All marshal and unmarshal behavior is implemented using these signatures. +type ( + marshaler = func(MarshalOptions, *Encoder, addressableValue) error + unmarshaler = func(UnmarshalOptions, *Decoder, addressableValue) error +) + +type arshaler struct { + marshal marshaler + unmarshal unmarshaler + nonDefault bool +} + +var lookupArshalerCache sync.Map // map[reflect.Type]*arshaler + +func lookupArshaler(t reflect.Type) *arshaler { + if v, ok := lookupArshalerCache.Load(t); ok { + return v.(*arshaler) + } + + fncs := makeDefaultArshaler(t) + fncs = makeMethodArshaler(fncs, t) + fncs = makeTimeArshaler(fncs, t) + + // Use the last stored so that duplicate arshalers can be garbage collected. + v, _ := lookupArshalerCache.LoadOrStore(t, fncs) + return v.(*arshaler) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go new file mode 100644 index 000000000..c62b1f320 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go @@ -0,0 +1,238 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import "reflect" + +// This files contains an optimized marshal and unmarshal implementation +// for the any type. This type is often used when the Go program has +// no knowledge of the JSON schema. This is a common enough occurrence +// to justify the complexity of adding logic for this. + +func marshalValueAny(mo MarshalOptions, enc *Encoder, val any) error { + switch val := val.(type) { + case nil: + return enc.WriteToken(Null) + case bool: + return enc.WriteToken(Bool(val)) + case string: + return enc.WriteToken(String(val)) + case float64: + return enc.WriteToken(Float(val)) + case map[string]any: + return marshalObjectAny(mo, enc, val) + case []any: + return marshalArrayAny(mo, enc, val) + default: + v := newAddressableValue(reflect.TypeOf(val)) + v.Set(reflect.ValueOf(val)) + marshal := lookupArshaler(v.Type()).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, v.Type()) + } + return marshal(mo, enc, v) + } +} + +func unmarshalValueAny(uo UnmarshalOptions, dec *Decoder) (any, error) { + switch k := dec.PeekKind(); k { + case '{': + return unmarshalObjectAny(uo, dec) + case '[': + return unmarshalArrayAny(uo, dec) + default: + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return nil, err + } + switch val.Kind() { + case 'n': + return nil, nil + case 'f': + return false, nil + case 't': + return true, nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if dec.stringCache == nil { + dec.stringCache = new(stringCache) + } + return dec.stringCache.make(val), nil + case '0': + fv, _ := parseFloat(val, 64) // ignore error since readValue guarantees val is valid + return fv, nil + default: + panic("BUG: invalid kind: " + k.String()) + } + } +} + +func marshalObjectAny(mo MarshalOptions, enc *Encoder, obj map[string]any) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + v := reflect.ValueOf(obj) + if err := enc.seenPointers.visit(v); err != nil { + return err + } + defer enc.seenPointers.leave(v) + } + + // Optimize for marshaling an empty map without any preceding whitespace. + if len(obj) == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '{') + enc.buf = append(enc.buf, "{}"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + // A Go map guarantees that each entry has a unique key + // The only possibility of duplicates is due to invalid UTF-8. + if !enc.options.AllowInvalidUTF8 { + enc.tokens.last.disableNamespace() + } + if !mo.Deterministic || len(obj) <= 1 { + for name, val := range obj { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } + } + } else { + names := getStrings(len(obj)) + var i int + for name := range obj { + (*names)[i] = name + i++ + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, obj[name]); err != nil { + return err + } + } + putStrings(names) + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil +} + +func unmarshalObjectAny(uo UnmarshalOptions, dec *Decoder) (map[string]any, error) { + tok, err := dec.ReadToken() + if err != nil { + return nil, err + } + k := tok.Kind() + switch k { + case 'n': + return nil, nil + case '{': + obj := make(map[string]any) + // A Go map guarantees that each entry has a unique key + // The only possibility of duplicates is due to invalid UTF-8. + if !dec.options.AllowInvalidUTF8 { + dec.tokens.last.disableNamespace() + } + for dec.PeekKind() != '}' { + tok, err := dec.ReadToken() + if err != nil { + return obj, err + } + name := tok.String() + + // Manually check for duplicate names. + if _, ok := obj[name]; ok { + name := dec.previousBuffer() + err := &SyntacticError{str: "duplicate name " + string(name) + " in object"} + return obj, err.withOffset(dec.InputOffset() - int64(len(name))) + } + + val, err := unmarshalValueAny(uo, dec) + obj[name] = val + if err != nil { + return obj, err + } + } + if _, err := dec.ReadToken(); err != nil { + return obj, err + } + return obj, nil + } + return nil, &SemanticError{action: "unmarshal", JSONKind: k, GoType: mapStringAnyType} +} + +func marshalArrayAny(mo MarshalOptions, enc *Encoder, arr []any) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + v := reflect.ValueOf(arr) + if err := enc.seenPointers.visit(v); err != nil { + return err + } + defer enc.seenPointers.leave(v) + } + + // Optimize for marshaling an empty slice without any preceding whitespace. + if len(arr) == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '[') + enc.buf = append(enc.buf, "[]"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + for _, val := range arr { + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil +} + +func unmarshalArrayAny(uo UnmarshalOptions, dec *Decoder) ([]any, error) { + tok, err := dec.ReadToken() + if err != nil { + return nil, err + } + k := tok.Kind() + switch k { + case 'n': + return nil, nil + case '[': + arr := []any{} + for dec.PeekKind() != ']' { + val, err := unmarshalValueAny(uo, dec) + arr = append(arr, val) + if err != nil { + return arr, err + } + } + if _, err := dec.ReadToken(); err != nil { + return arr, err + } + return arr, nil + } + return nil, &SemanticError{action: "unmarshal", JSONKind: k, GoType: sliceAnyType} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go new file mode 100644 index 000000000..fd26eba35 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go @@ -0,0 +1,1485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "sync" +) + +// optimizeCommon specifies whether to use optimizations targeted for certain +// common patterns, rather than using the slower, but more general logic. +// All tests should pass regardless of whether this is true or not. +const optimizeCommon = true + +var ( + // Most natural Go type that correspond with each JSON type. + anyType = reflect.TypeOf((*any)(nil)).Elem() // JSON value + boolType = reflect.TypeOf((*bool)(nil)).Elem() // JSON bool + stringType = reflect.TypeOf((*string)(nil)).Elem() // JSON string + float64Type = reflect.TypeOf((*float64)(nil)).Elem() // JSON number + mapStringAnyType = reflect.TypeOf((*map[string]any)(nil)).Elem() // JSON object + sliceAnyType = reflect.TypeOf((*[]any)(nil)).Elem() // JSON array + + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + emptyStructType = reflect.TypeOf((*struct{})(nil)).Elem() +) + +const startDetectingCyclesAfter = 1000 + +type seenPointers map[typedPointer]struct{} + +type typedPointer struct { + typ reflect.Type + ptr any // always stores unsafe.Pointer, but avoids depending on unsafe +} + +// visit visits pointer p of type t, reporting an error if seen before. +// If successfully visited, then the caller must eventually call leave. +func (m *seenPointers) visit(v reflect.Value) error { + p := typedPointer{v.Type(), v.UnsafePointer()} + if _, ok := (*m)[p]; ok { + return &SemanticError{action: "marshal", GoType: p.typ, Err: errors.New("encountered a cycle")} + } + if *m == nil { + *m = make(map[typedPointer]struct{}) + } + (*m)[p] = struct{}{} + return nil +} +func (m *seenPointers) leave(v reflect.Value) { + p := typedPointer{v.Type(), v.UnsafePointer()} + delete(*m, p) +} + +func makeDefaultArshaler(t reflect.Type) *arshaler { + switch t.Kind() { + case reflect.Bool: + return makeBoolArshaler(t) + case reflect.String: + return makeStringArshaler(t) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return makeIntArshaler(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return makeUintArshaler(t) + case reflect.Float32, reflect.Float64: + return makeFloatArshaler(t) + case reflect.Map: + return makeMapArshaler(t) + case reflect.Struct: + return makeStructArshaler(t) + case reflect.Slice: + fncs := makeSliceArshaler(t) + if t.AssignableTo(bytesType) { + return makeBytesArshaler(t, fncs) + } + return fncs + case reflect.Array: + fncs := makeArrayArshaler(t) + if reflect.SliceOf(t.Elem()).AssignableTo(bytesType) { + return makeBytesArshaler(t, fncs) + } + return fncs + case reflect.Pointer: + return makePointerArshaler(t) + case reflect.Interface: + return makeInterfaceArshaler(t) + default: + return makeInvalidArshaler(t) + } +} + +func makeBoolArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace. + if optimizeCommon && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, 't') + if va.Bool() { + enc.buf = append(enc.buf, "true"...) + } else { + enc.buf = append(enc.buf, "false"...) + } + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + return enc.WriteToken(Bool(va.Bool())) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.SetBool(false) + return nil + case 't', 'f': + va.SetBool(tok.Bool()) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeStringArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + return enc.WriteToken(String(va.String())) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetString("") + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if dec.stringCache == nil { + dec.stringCache = new(stringCache) + } + str := dec.stringCache.make(val) + va.SetString(str) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +var ( + encodeBase16 = func(dst, src []byte) { hex.Encode(dst, src) } + encodeBase32 = base32.StdEncoding.Encode + encodeBase32Hex = base32.HexEncoding.Encode + encodeBase64 = base64.StdEncoding.Encode + encodeBase64URL = base64.URLEncoding.Encode + encodedLenBase16 = hex.EncodedLen + encodedLenBase32 = base32.StdEncoding.EncodedLen + encodedLenBase32Hex = base32.HexEncoding.EncodedLen + encodedLenBase64 = base64.StdEncoding.EncodedLen + encodedLenBase64URL = base64.URLEncoding.EncodedLen + decodeBase16 = hex.Decode + decodeBase32 = base32.StdEncoding.Decode + decodeBase32Hex = base32.HexEncoding.Decode + decodeBase64 = base64.StdEncoding.Decode + decodeBase64URL = base64.URLEncoding.Decode + decodedLenBase16 = hex.DecodedLen + decodedLenBase32 = base32.StdEncoding.WithPadding(base32.NoPadding).DecodedLen + decodedLenBase32Hex = base32.HexEncoding.WithPadding(base32.NoPadding).DecodedLen + decodedLenBase64 = base64.StdEncoding.WithPadding(base64.NoPadding).DecodedLen + decodedLenBase64URL = base64.URLEncoding.WithPadding(base64.NoPadding).DecodedLen +) + +func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { + // NOTE: This handles both []byte and [N]byte. + marshalDefault := fncs.marshal + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + encode, encodedLen := encodeBase64, encodedLenBase64 + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + switch mo.format { + case "base64": + encode, encodedLen = encodeBase64, encodedLenBase64 + case "base64url": + encode, encodedLen = encodeBase64URL, encodedLenBase64URL + case "base32": + encode, encodedLen = encodeBase32, encodedLenBase32 + case "base32hex": + encode, encodedLen = encodeBase32Hex, encodedLenBase32Hex + case "base16", "hex": + encode, encodedLen = encodeBase16, encodedLenBase16 + case "array": + mo.format = "" + return marshalDefault(mo, enc, va) + default: + return newInvalidFormatError("marshal", t, mo.format) + } + } + val := enc.UnusedBuffer() + b := va.Bytes() + n := len(`"`) + encodedLen(len(b)) + len(`"`) + if cap(val) < n { + val = make([]byte, n) + } else { + val = val[:n] + } + val[0] = '"' + encode(val[len(`"`):len(val)-len(`"`)], b) + val[len(val)-1] = '"' + return enc.WriteValue(val) + } + unmarshalDefault := fncs.unmarshal + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + decode, decodedLen, encodedLen := decodeBase64, decodedLenBase64, encodedLenBase64 + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + switch uo.format { + case "base64": + decode, decodedLen, encodedLen = decodeBase64, decodedLenBase64, encodedLenBase64 + case "base64url": + decode, decodedLen, encodedLen = decodeBase64URL, decodedLenBase64URL, encodedLenBase64URL + case "base32": + decode, decodedLen, encodedLen = decodeBase32, decodedLenBase32, encodedLenBase32 + case "base32hex": + decode, decodedLen, encodedLen = decodeBase32Hex, decodedLenBase32Hex, encodedLenBase32Hex + case "base16", "hex": + decode, decodedLen, encodedLen = decodeBase16, decodedLenBase16, encodedLenBase16 + case "array": + uo.format = "" + return unmarshalDefault(uo, dec, va) + default: + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + + // For base64 and base32, decodedLen computes the maximum output size + // when given the original input size. To compute the exact size, + // adjust the input size by excluding trailing padding characters. + // This is unnecessary for base16, but also harmless. + n := len(val) + for n > 0 && val[n-1] == '=' { + n-- + } + n = decodedLen(n) + b := va.Bytes() + if va.Kind() == reflect.Array { + if n != len(b) { + err := fmt.Errorf("decoded base64 length of %d mismatches array length of %d", n, len(b)) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + } else { + if b == nil || cap(b) < n { + b = make([]byte, n) + } else { + b = b[:n] + } + } + n2, err := decode(b, val) + if err == nil && len(val) != encodedLen(n2) { + // TODO(https://go.dev/issue/53845): RFC 4648, section 3.3, + // specifies that non-alphabet characters must be rejected. + // Unfortunately, the "base32" and "base64" packages allow + // '\r' and '\n' characters by default. + err = errors.New("illegal data at input byte " + strconv.Itoa(bytes.IndexAny(val, "\r\n"))) + } + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + if va.Kind() == reflect.Slice { + va.SetBytes(b) + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return fncs +} + +func makeIntArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = strconv.AppendInt(enc.buf, va.Int(), 10) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + x := math.Float64frombits(uint64(va.Int())) + return enc.writeNumber(x, rawIntNumber, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetInt(0) + return nil + case '"': + if !uo.StringifyNumbers { + break + } + val = unescapeStringMayCopy(val, flags.isVerbatim()) + fallthrough + case '0': + var negOffset int + neg := val[0] == '-' + if neg { + negOffset = 1 + } + n, ok := parseDecUint(val[negOffset:]) + maxInt := uint64(1) << (bits - 1) + overflow := (neg && n > maxInt) || (!neg && n > maxInt-1) + if !ok { + if n != math.MaxUint64 { + err := fmt.Errorf("cannot parse %q as signed integer: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + overflow = true + } + if overflow { + err := fmt.Errorf("cannot parse %q as signed integer: %w", val, strconv.ErrRange) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + if neg { + va.SetInt(int64(-n)) + } else { + va.SetInt(int64(+n)) + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeUintArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = strconv.AppendUint(enc.buf, va.Uint(), 10) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + x := math.Float64frombits(va.Uint()) + return enc.writeNumber(x, rawUintNumber, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetUint(0) + return nil + case '"': + if !uo.StringifyNumbers { + break + } + val = unescapeStringMayCopy(val, flags.isVerbatim()) + fallthrough + case '0': + n, ok := parseDecUint(val) + maxUint := uint64(1) << bits + overflow := n > maxUint-1 + if !ok { + if n != math.MaxUint64 { + err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + overflow = true + } + if overflow { + err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrRange) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + va.SetUint(n) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeFloatArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + var allowNonFinite bool + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "nonfinite" { + allowNonFinite = true + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + fv := va.Float() + if math.IsNaN(fv) || math.IsInf(fv, 0) { + if !allowNonFinite { + err := fmt.Errorf("invalid value: %v", fv) + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteToken(Float(fv)) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = appendNumber(enc.buf, fv, bits) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + return enc.writeNumber(fv, bits, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + var allowNonFinite bool + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "nonfinite" { + allowNonFinite = true + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetFloat(0) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if allowNonFinite { + switch string(val) { + case "NaN": + va.SetFloat(math.NaN()) + return nil + case "Infinity": + va.SetFloat(math.Inf(+1)) + return nil + case "-Infinity": + va.SetFloat(math.Inf(-1)) + return nil + } + } + if !uo.StringifyNumbers { + break + } + if n, err := consumeNumber(val); n != len(val) || err != nil { + err := fmt.Errorf("cannot parse %q as JSON number: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + fallthrough + case '0': + // NOTE: Floating-point parsing is by nature a lossy operation. + // We never report an overflow condition since we can always + // round the input to the closest representable finite value. + // For extremely large numbers, the closest value is ±MaxFloat. + fv, _ := parseFloat(val, bits) + va.SetFloat(fv) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeMapArshaler(t reflect.Type) *arshaler { + // NOTE: The logic below disables namespaces for tracking duplicate names + // when handling map keys with a unique representation. + + // NOTE: Values retrieved from a map are not addressable, + // so we shallow copy the values to make them addressable and + // store them back into the map afterwards. + + var fncs arshaler + var ( + once sync.Once + keyFncs *arshaler + valFncs *arshaler + ) + init := func() { + keyFncs = lookupArshaler(t.Key()) + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "emitnull" { + if va.IsNil() { + return enc.WriteToken(Null) + } + mo.format = "" + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + // Optimize for marshaling an empty map without any preceding whitespace. + n := va.Len() + if optimizeCommon && n == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '{') + enc.buf = append(enc.buf, "{}"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + once.Do(init) + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + if n > 0 { + // Handle maps with numeric key types by stringifying them. + mko := mo + mko.StringifyNumbers = true + + nonDefaultKey := keyFncs.nonDefault + marshalKey := keyFncs.marshal + marshalVal := valFncs.marshal + if mo.Marshalers != nil { + var ok bool + marshalKey, ok = mo.Marshalers.lookup(marshalKey, t.Key()) + marshalVal, _ = mo.Marshalers.lookup(marshalVal, t.Elem()) + nonDefaultKey = nonDefaultKey || ok + } + k := newAddressableValue(t.Key()) + v := newAddressableValue(t.Elem()) + + // A Go map guarantees that each entry has a unique key. + // As such, disable the expensive duplicate name check if we know + // that every Go key will serialize as a unique JSON string. + if !nonDefaultKey && mapKeyWithUniqueRepresentation(k.Kind(), enc.options.AllowInvalidUTF8) { + enc.tokens.last.disableNamespace() + } + + switch { + case !mo.Deterministic || n <= 1: + for iter := va.Value.MapRange(); iter.Next(); { + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + v.SetIterValue(iter) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + case !nonDefaultKey && t.Key().Kind() == reflect.String: + names := getStrings(n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + k.SetIterKey(iter) + (*names)[i] = k.String() + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + k.SetString(name) + v.Set(va.MapIndex(k.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + putStrings(names) + default: + type member struct { + name string // unquoted name + key addressableValue + } + members := make([]member, n) + keys := reflect.MakeSlice(reflect.SliceOf(t.Key()), n, n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + // Marshal the member name. + k := addressableValue{keys.Index(i)} // indexed slice element is always addressable + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + name := enc.unwriteOnlyObjectMemberName() + members[i] = member{name, k} + } + // TODO: If AllowDuplicateNames is enabled, then sort according + // to reflect.Value as well if the names are equal. + // See internal/fmtsort. + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Slice(members, func(i, j int) bool { + return lessUTF16(members[i].name, members[j].name) + }) + for _, member := range members { + if err := enc.WriteToken(String(member.name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + v.Set(va.MapIndex(member.key.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + } + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "emitnull" { + uo.format = "" // only relevant for marshaling + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '{': + once.Do(init) + if va.IsNil() { + va.Set(reflect.MakeMap(t)) + } + + // Handle maps with numeric key types by stringifying them. + uko := uo + uko.StringifyNumbers = true + + nonDefaultKey := keyFncs.nonDefault + unmarshalKey := keyFncs.unmarshal + unmarshalVal := valFncs.unmarshal + if uo.Unmarshalers != nil { + var ok bool + unmarshalKey, ok = uo.Unmarshalers.lookup(unmarshalKey, t.Key()) + unmarshalVal, _ = uo.Unmarshalers.lookup(unmarshalVal, t.Elem()) + nonDefaultKey = nonDefaultKey || ok + } + k := newAddressableValue(t.Key()) + v := newAddressableValue(t.Elem()) + + // Manually check for duplicate entries by virtue of whether the + // unmarshaled key already exists in the destination Go map. + // Consequently, syntactically different names (e.g., "0" and "-0") + // will be rejected as duplicates since they semantically refer + // to the same Go value. This is an unusual interaction + // between syntax and semantics, but is more correct. + if !nonDefaultKey && mapKeyWithUniqueRepresentation(k.Kind(), dec.options.AllowInvalidUTF8) { + dec.tokens.last.disableNamespace() + } + + // In the rare case where the map is not already empty, + // then we need to manually track which keys we already saw + // since existing presence alone is insufficient to indicate + // whether the input had a duplicate name. + var seen reflect.Value + if !dec.options.AllowDuplicateNames && va.Len() > 0 { + seen = reflect.MakeMap(reflect.MapOf(k.Type(), emptyStructType)) + } + + for dec.PeekKind() != '}' { + k.Set(reflect.Zero(t.Key())) + if err := unmarshalKey(uko, dec, k); err != nil { + return err + } + if k.Kind() == reflect.Interface && !k.IsNil() && !k.Elem().Type().Comparable() { + err := fmt.Errorf("invalid incomparable key type %v", k.Elem().Type()) + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + + if v2 := va.MapIndex(k.Value); v2.IsValid() { + if !dec.options.AllowDuplicateNames && (!seen.IsValid() || seen.MapIndex(k.Value).IsValid()) { + // TODO: Unread the object name. + name := dec.previousBuffer() + err := &SyntacticError{str: "duplicate name " + string(name) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(name))) + } + v.Set(v2) + } else { + v.Set(reflect.Zero(v.Type())) + } + err := unmarshalVal(uo, dec, v) + va.SetMapIndex(k.Value, v.Value) + if seen.IsValid() { + seen.SetMapIndex(k.Value, reflect.Zero(emptyStructType)) + } + if err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +// mapKeyWithUniqueRepresentation reports whether all possible values of k +// marshal to a different JSON value, and whether all possible JSON values +// that can unmarshal into k unmarshal to different Go values. +// In other words, the representation must be a bijective. +func mapKeyWithUniqueRepresentation(k reflect.Kind, allowInvalidUTF8 bool) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.String: + // For strings, we have to be careful since names with invalid UTF-8 + // maybe unescape to the same Go string value. + return !allowInvalidUTF8 + default: + // Floating-point kinds are not listed above since NaNs + // can appear multiple times and all serialize as "NaN". + return false + } +} + +func makeStructArshaler(t reflect.Type) *arshaler { + // NOTE: The logic below disables namespaces for tracking duplicate names + // and does the tracking locally with an efficient bit-set based on which + // Go struct fields were seen. + + var fncs arshaler + var ( + once sync.Once + fields structFields + errInit *SemanticError + ) + init := func() { + fields, errInit = makeStructFields(t) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + once.Do(init) + if errInit != nil { + err := *errInit // shallow copy SemanticError + err.action = "marshal" + return &err + } + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + var seenIdxs uintSet + prevIdx := -1 + enc.tokens.last.disableNamespace() // we manually ensure unique names below + for i := range fields.flattened { + f := &fields.flattened[i] + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], false) + if !v.IsValid() { + continue // implies a nil inlined field + } + } + + // OmitZero skips the field if the Go value is zero, + // which we can determine up front without calling the marshaler. + if f.omitzero && ((f.isZero == nil && v.IsZero()) || (f.isZero != nil && f.isZero(v))) { + continue + } + + marshal := f.fncs.marshal + nonDefault := f.fncs.nonDefault + if mo.Marshalers != nil { + var ok bool + marshal, ok = mo.Marshalers.lookup(marshal, f.typ) + nonDefault = nonDefault || ok + } + + // OmitEmpty skips the field if the marshaled JSON value is empty, + // which we can know up front if there are no custom marshalers, + // otherwise we must marshal the value and unwrite it if empty. + if f.omitempty && !nonDefault && f.isEmpty != nil && f.isEmpty(v) { + continue // fast path for omitempty + } + + // Write the object member name. + // + // The logic below is semantically equivalent to: + // enc.WriteToken(String(f.name)) + // but specialized and simplified because: + // 1. The Encoder must be expecting an object name. + // 2. The object namespace is guaranteed to be disabled. + // 3. The object name is guaranteed to be valid and pre-escaped. + // 4. There is no need to flush the buffer (for unwrite purposes). + // 5. There is no possibility of an error occurring. + if optimizeCommon { + // Append any delimiters or optional whitespace. + if enc.tokens.last.length() > 0 { + enc.buf = append(enc.buf, ',') + } + if enc.options.multiline { + enc.buf = enc.appendIndent(enc.buf, enc.tokens.needIndent('"')) + } + + // Append the token to the output and to the state machine. + n0 := len(enc.buf) // offset before calling appendString + if enc.options.EscapeRune == nil { + enc.buf = append(enc.buf, f.quotedName...) + } else { + enc.buf, _ = appendString(enc.buf, f.name, false, enc.options.EscapeRune) + } + if !enc.options.AllowDuplicateNames { + enc.names.replaceLastQuotedOffset(n0) + } + enc.tokens.last.increment() + } else { + if err := enc.WriteToken(String(f.name)); err != nil { + return err + } + } + + // Write the object member value. + mo2 := mo + if f.string { + mo2.StringifyNumbers = true + } + if f.format != "" { + mo2.formatDepth = enc.tokens.depth() + mo2.format = f.format + } + if err := marshal(mo2, enc, v); err != nil { + return err + } + + // Try unwriting the member if empty (slow path for omitempty). + if f.omitempty { + var prevName *string + if prevIdx >= 0 { + prevName = &fields.flattened[prevIdx].name + } + if enc.unwriteEmptyObjectMember(prevName) { + continue + } + } + + // Remember the previous written object member. + // The set of seen fields only needs to be updated to detect + // duplicate names with those from the inlined fallback. + if !enc.options.AllowDuplicateNames && fields.inlinedFallback != nil { + seenIdxs.insert(uint(f.id)) + } + prevIdx = f.id + } + if fields.inlinedFallback != nil && !(mo.DiscardUnknownMembers && fields.inlinedFallback.unknown) { + var insertUnquotedName func([]byte) bool + if !enc.options.AllowDuplicateNames { + insertUnquotedName = func(name []byte) bool { + // Check that the name from inlined fallback does not match + // one of the previously marshaled names from known fields. + if foldedFields := fields.byFoldedName[string(foldName(name))]; len(foldedFields) > 0 { + if f := fields.byActualName[string(name)]; f != nil { + return seenIdxs.insert(uint(f.id)) + } + for _, f := range foldedFields { + if f.nocase { + return seenIdxs.insert(uint(f.id)) + } + } + } + + // Check that the name does not match any other name + // previously marshaled from the inlined fallback. + return enc.namespaces.last().insertUnquoted(name) + } + } + if err := marshalInlinedFallbackAll(mo, enc, va, fields.inlinedFallback, insertUnquotedName); err != nil { + return err + } + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '{': + once.Do(init) + if errInit != nil { + err := *errInit // shallow copy SemanticError + err.action = "unmarshal" + return &err + } + var seenIdxs uintSet + dec.tokens.last.disableNamespace() + for dec.PeekKind() != '}' { + // Process the object member name. + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + name := unescapeStringMayCopy(val, flags.isVerbatim()) + f := fields.byActualName[string(name)] + if f == nil { + for _, f2 := range fields.byFoldedName[string(foldName(name))] { + if f2.nocase { + f = f2 + break + } + } + if f == nil { + if uo.RejectUnknownMembers && (fields.inlinedFallback == nil || fields.inlinedFallback.unknown) { + return &SemanticError{action: "unmarshal", GoType: t, Err: fmt.Errorf("unknown name %s", val)} + } + if !dec.options.AllowDuplicateNames && !dec.namespaces.last().insertUnquoted(name) { + // TODO: Unread the object name. + err := &SyntacticError{str: "duplicate name " + string(val) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(val))) + } + + if fields.inlinedFallback == nil { + // Skip unknown value since we have no place to store it. + if err := dec.SkipValue(); err != nil { + return err + } + } else { + // Marshal into value capable of storing arbitrary object members. + if err := unmarshalInlinedFallbackNext(uo, dec, va, fields.inlinedFallback, val, name); err != nil { + return err + } + } + continue + } + } + if !dec.options.AllowDuplicateNames && !seenIdxs.insert(uint(f.id)) { + // TODO: Unread the object name. + err := &SyntacticError{str: "duplicate name " + string(val) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(val))) + } + + // Process the object member value. + unmarshal := f.fncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, f.typ) + } + uo2 := uo + if f.string { + uo2.StringifyNumbers = true + } + if f.format != "" { + uo2.formatDepth = dec.tokens.depth() + uo2.format = f.format + } + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], true) + } + if err := unmarshal(uo2, dec, v); err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func (va addressableValue) fieldByIndex(index []int, mayAlloc bool) addressableValue { + for _, i := range index { + va = va.indirect(mayAlloc) + if !va.IsValid() { + return va + } + va = addressableValue{va.Field(i)} // addressable if struct value is addressable + } + return va +} + +func (va addressableValue) indirect(mayAlloc bool) addressableValue { + if va.Kind() == reflect.Pointer { + if va.IsNil() { + if !mayAlloc { + return addressableValue{} + } + va.Set(reflect.New(va.Type().Elem())) + } + va = addressableValue{va.Elem()} // dereferenced pointer is always addressable + } + return va +} + +func makeSliceArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "emitnull" { + if va.IsNil() { + return enc.WriteToken(Null) + } + mo.format = "" + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + // Optimize for marshaling an empty slice without any preceding whitespace. + n := va.Len() + if optimizeCommon && n == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '[') + enc.buf = append(enc.buf, "[]"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + once.Do(init) + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + for i := 0; i < n; i++ { + v := addressableValue{va.Index(i)} // indexed slice element is always addressable + if err := marshal(mo, enc, v); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil + } + emptySlice := reflect.MakeSlice(t, 0, 0) + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "emitnull" { + uo.format = "" // only relevant for marshaling + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '[': + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + mustZero := true // we do not know the cleanliness of unused capacity + cap := va.Cap() + if cap > 0 { + va.SetLen(cap) + } + var i int + for dec.PeekKind() != ']' { + if i == cap { + // TODO(https://go.dev/issue/48000): Use reflect.Value.Append. + va.Set(reflect.Append(va.Value, reflect.Zero(t.Elem()))) + cap = va.Cap() + va.SetLen(cap) + mustZero = false // append guarantees that unused capacity is zero-initialized + } + v := addressableValue{va.Index(i)} // indexed slice element is always addressable + i++ + if mustZero { + v.Set(reflect.Zero(t.Elem())) + } + if err := unmarshal(uo, dec, v); err != nil { + va.SetLen(i) + return err + } + } + if i == 0 { + va.Set(emptySlice) + } else { + va.SetLen(i) + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeArrayArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + n := t.Len() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + once.Do(init) + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + for i := 0; i < n; i++ { + v := addressableValue{va.Index(i)} // indexed array element is addressable if array is addressable + if err := marshal(mo, enc, v); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '[': + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + var i int + for dec.PeekKind() != ']' { + if i >= n { + err := errors.New("too many array elements") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + v := addressableValue{va.Index(i)} // indexed array element is addressable if array is addressable + v.Set(reflect.Zero(v.Type())) + if err := unmarshal(uo, dec, v); err != nil { + return err + } + i++ + } + if _, err := dec.ReadToken(); err != nil { + return err + } + if i < n { + err := errors.New("too few array elements") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makePointerArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + // NOTE: MarshalOptions.format is forwarded to underlying marshal. + if va.IsNil() { + return enc.WriteToken(Null) + } + once.Do(init) + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + v := addressableValue{va.Elem()} // dereferenced pointer is always addressable + return marshal(mo, enc, v) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + // NOTE: UnmarshalOptions.format is forwarded to underlying unmarshal. + if dec.PeekKind() == 'n' { + if _, err := dec.ReadToken(); err != nil { + return err + } + va.Set(reflect.Zero(t)) + return nil + } + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + if va.IsNil() { + va.Set(reflect.New(t.Elem())) + } + v := addressableValue{va.Elem()} // dereferenced pointer is always addressable + return unmarshal(uo, dec, v) + } + return &fncs +} + +func makeInterfaceArshaler(t reflect.Type) *arshaler { + // NOTE: Values retrieved from an interface are not addressable, + // so we shallow copy the values to make them addressable and + // store them back into the interface afterwards. + + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + if va.IsNil() { + return enc.WriteToken(Null) + } + v := newAddressableValue(va.Elem().Type()) + v.Set(va.Elem()) + marshal := lookupArshaler(v.Type()).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, v.Type()) + } + // Optimize for the any type if there are no special options. + if optimizeCommon && t == anyType && !mo.StringifyNumbers && mo.format == "" && (mo.Marshalers == nil || !mo.Marshalers.fromAny) { + return marshalValueAny(mo, enc, va.Elem().Interface()) + } + return marshal(mo, enc, v) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + if dec.PeekKind() == 'n' { + if _, err := dec.ReadToken(); err != nil { + return err + } + va.Set(reflect.Zero(t)) + return nil + } + var v addressableValue + if va.IsNil() { + // Optimize for the any type if there are no special options. + // We do not care about stringified numbers since JSON strings + // are always unmarshaled into an any value as Go strings. + // Duplicate name check must be enforced since unmarshalValueAny + // does not implement merge semantics. + if optimizeCommon && t == anyType && uo.format == "" && (uo.Unmarshalers == nil || !uo.Unmarshalers.fromAny) && !dec.options.AllowDuplicateNames { + v, err := unmarshalValueAny(uo, dec) + // We must check for nil interface values up front. + // See https://go.dev/issue/52310. + if v != nil { + va.Set(reflect.ValueOf(v)) + } + return err + } + + k := dec.PeekKind() + if !isAnyType(t) { + err := errors.New("cannot derive concrete type for non-empty interface") + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + switch k { + case 'f', 't': + v = newAddressableValue(boolType) + case '"': + v = newAddressableValue(stringType) + case '0': + v = newAddressableValue(float64Type) + case '{': + v = newAddressableValue(mapStringAnyType) + case '[': + v = newAddressableValue(sliceAnyType) + default: + // If k is invalid (e.g., due to an I/O or syntax error), then + // that will be cached by PeekKind and returned by ReadValue. + // If k is '}' or ']', then ReadValue must error since + // those are invalid kinds at the start of a JSON value. + _, err := dec.ReadValue() + return err + } + } else { + // Shallow copy the existing value to keep it addressable. + // Any mutations at the top-level of the value will be observable + // since we always store this value back into the interface value. + v = newAddressableValue(va.Elem().Type()) + v.Set(va.Elem()) + } + unmarshal := lookupArshaler(v.Type()).unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, v.Type()) + } + err := unmarshal(uo, dec, v) + va.Set(v.Value) + return err + } + return &fncs +} + +// isAnyType reports wether t is equivalent to the any interface type. +func isAnyType(t reflect.Type) bool { + // This is forward compatible if the Go language permits type sets within + // ordinary interfaces where an interface with zero methods does not + // necessarily mean it can hold every possible Go type. + // See https://go.dev/issue/45346. + return t == anyType || anyType.Implements(t) +} + +func makeInvalidArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + return &SemanticError{action: "marshal", GoType: t} + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + return &SemanticError{action: "unmarshal", GoType: t} + } + return &fncs +} + +func newInvalidFormatError(action string, t reflect.Type, format string) error { + err := fmt.Errorf("invalid format flag: %q", format) + return &SemanticError{action: action, GoType: t, Err: err} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go new file mode 100644 index 000000000..8a4e70083 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go @@ -0,0 +1,387 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// SkipFunc may be returned by MarshalFuncV2 and UnmarshalFuncV2 functions. +// +// Any function that returns SkipFunc must not cause observable side effects +// on the provided Encoder or Decoder. For example, it is permissible to call +// Decoder.PeekKind, but not permissible to call Decoder.ReadToken or +// Encoder.WriteToken since such methods mutate the state. +const SkipFunc = jsonError("skip function") + +// Marshalers is a list of functions that may override the marshal behavior +// of specific types. Populate MarshalOptions.Marshalers to use it. +// A nil *Marshalers is equivalent to an empty list. +type Marshalers = typedMarshalers + +// NewMarshalers constructs a flattened list of marshal functions. +// If multiple functions in the list are applicable for a value of a given type, +// then those earlier in the list take precedence over those that come later. +// If a function returns SkipFunc, then the next applicable function is called, +// otherwise the default marshaling behavior is used. +// +// For example: +// +// m1 := NewMarshalers(f1, f2) +// m2 := NewMarshalers(f0, m1, f3) // equivalent to m3 +// m3 := NewMarshalers(f0, f1, f2, f3) // equivalent to m2 +func NewMarshalers(ms ...*Marshalers) *Marshalers { + return newMarshalers(ms...) +} + +// Unmarshalers is a list of functions that may override the unmarshal behavior +// of specific types. Populate UnmarshalOptions.Unmarshalers to use it. +// A nil *Unmarshalers is equivalent to an empty list. +type Unmarshalers = typedUnmarshalers + +// NewUnmarshalers constructs a flattened list of unmarshal functions. +// If multiple functions in the list are applicable for a value of a given type, +// then those earlier in the list take precedence over those that come later. +// If a function returns SkipFunc, then the next applicable function is called, +// otherwise the default unmarshaling behavior is used. +// +// For example: +// +// u1 := NewUnmarshalers(f1, f2) +// u2 := NewUnmarshalers(f0, u1, f3) // equivalent to u3 +// u3 := NewUnmarshalers(f0, f1, f2, f3) // equivalent to u2 +func NewUnmarshalers(us ...*Unmarshalers) *Unmarshalers { + return newUnmarshalers(us...) +} + +type typedMarshalers = typedArshalers[MarshalOptions, Encoder] +type typedUnmarshalers = typedArshalers[UnmarshalOptions, Decoder] +type typedArshalers[Options, Coder any] struct { + nonComparable + + fncVals []typedArshaler[Options, Coder] + fncCache sync.Map // map[reflect.Type]arshaler + + // fromAny reports whether any of Go types used to represent arbitrary JSON + // (i.e., any, bool, string, float64, map[string]any, or []any) matches + // any of the provided type-specific arshalers. + // + // This bit of information is needed in arshal_default.go to determine + // whether to use the specialized logic in arshal_any.go to handle + // the any interface type. The logic in arshal_any.go does not support + // type-specific arshal functions, so we must avoid using that logic + // if this is true. + fromAny bool +} +type typedMarshaler = typedArshaler[MarshalOptions, Encoder] +type typedUnmarshaler = typedArshaler[UnmarshalOptions, Decoder] +type typedArshaler[Options, Coder any] struct { + typ reflect.Type + fnc func(Options, *Coder, addressableValue) error + maySkip bool +} + +func newMarshalers(ms ...*Marshalers) *Marshalers { return newTypedArshalers(ms...) } +func newUnmarshalers(us ...*Unmarshalers) *Unmarshalers { return newTypedArshalers(us...) } +func newTypedArshalers[Options, Coder any](as ...*typedArshalers[Options, Coder]) *typedArshalers[Options, Coder] { + var a typedArshalers[Options, Coder] + for _, a2 := range as { + if a2 != nil { + a.fncVals = append(a.fncVals, a2.fncVals...) + a.fromAny = a.fromAny || a2.fromAny + } + } + if len(a.fncVals) == 0 { + return nil + } + return &a +} + +func (a *typedArshalers[Options, Coder]) lookup(fnc func(Options, *Coder, addressableValue) error, t reflect.Type) (func(Options, *Coder, addressableValue) error, bool) { + if a == nil { + return fnc, false + } + if v, ok := a.fncCache.Load(t); ok { + if v == nil { + return fnc, false + } + return v.(func(Options, *Coder, addressableValue) error), true + } + + // Collect a list of arshalers that can be called for this type. + // This list may be longer than 1 since some arshalers can be skipped. + var fncs []func(Options, *Coder, addressableValue) error + for _, fncVal := range a.fncVals { + if !castableTo(t, fncVal.typ) { + continue + } + fncs = append(fncs, fncVal.fnc) + if !fncVal.maySkip { + break // subsequent arshalers will never be called + } + } + + if len(fncs) == 0 { + a.fncCache.Store(t, nil) // nil to indicate that no funcs found + return fnc, false + } + + // Construct an arshaler that may call every applicable arshaler. + fncDefault := fnc + fnc = func(o Options, c *Coder, v addressableValue) error { + for _, fnc := range fncs { + if err := fnc(o, c, v); err != SkipFunc { + return err // may be nil or non-nil + } + } + return fncDefault(o, c, v) + } + + // Use the first stored so duplicate work can be garbage collected. + v, _ := a.fncCache.LoadOrStore(t, fnc) + return v.(func(Options, *Coder, addressableValue) error), true +} + +// MarshalFuncV1 constructs a type-specific marshaler that +// specifies how to marshal values of type T. +// T can be any type except a named pointer. +// The function is always provided with a non-nil pointer value +// if T is an interface or pointer type. +// +// The function must marshal exactly one JSON value. +// The value of T must not be retained outside the function call. +// It may not return SkipFunc. +func MarshalFuncV1[T any](fn func(T) ([]byte, error)) *Marshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, true) + typFnc := typedMarshaler{ + typ: t, + fnc: func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + val, err := fn(va.castTo(t).Interface().(T)) + if err != nil { + err = wrapSkipFunc(err, "marshal function of type func(T) ([]byte, error)") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: RawValue(val).Kind(), GoType: t, Err: err} + } + return nil + }, + } + return &Marshalers{fncVals: []typedMarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// MarshalFuncV2 constructs a type-specific marshaler that +// specifies how to marshal values of type T. +// T can be any type except a named pointer. +// The function is always provided with a non-nil pointer value +// if T is an interface or pointer type. +// +// The function must marshal exactly one JSON value by calling write methods +// on the provided encoder. It may return SkipFunc such that marshaling can +// move on to the next marshal function. However, no mutable method calls may +// be called on the encoder if SkipFunc is returned. +// The pointer to Encoder and the value of T must not be retained +// outside the function call. +func MarshalFuncV2[T any](fn func(MarshalOptions, *Encoder, T) error) *Marshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, true) + typFnc := typedMarshaler{ + typ: t, + fnc: func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + prevDepth, prevLength := enc.tokens.depthLength() + err := fn(mo, enc, va.castTo(t).Interface().(T)) + currDepth, currLength := enc.tokens.depthLength() + if err == nil && (prevDepth != currDepth || prevLength+1 != currLength) { + err = errors.New("must write exactly one JSON value") + } + if err != nil { + if err == SkipFunc { + if prevDepth == currDepth && prevLength == currLength { + return SkipFunc + } + err = errors.New("must not write any JSON tokens when skipping") + } + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return nil + }, + maySkip: true, + } + return &Marshalers{fncVals: []typedMarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// UnmarshalFuncV1 constructs a type-specific unmarshaler that +// specifies how to unmarshal values of type T. +// T must be an unnamed pointer or an interface type. +// The function is always provided with a non-nil pointer value. +// +// The function must unmarshal exactly one JSON value. +// The input []byte must not be mutated. +// The input []byte and value T must not be retained outside the function call. +// It may not return SkipFunc. +func UnmarshalFuncV1[T any](fn func([]byte, T) error) *Unmarshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, false) + typFnc := typedUnmarshaler{ + typ: t, + fnc: func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + val, err := dec.ReadValue() + if err != nil { + return err // must be a syntactic or I/O error + } + err = fn(val, va.castTo(t).Interface().(T)) + if err != nil { + err = wrapSkipFunc(err, "unmarshal function of type func([]byte, T) error") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + }, + } + return &Unmarshalers{fncVals: []typedUnmarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// UnmarshalFuncV2 constructs a type-specific unmarshaler that +// specifies how to unmarshal values of type T. +// T must be an unnamed pointer or an interface type. +// The function is always provided with a non-nil pointer value. +// +// The function must unmarshal exactly one JSON value by calling read methods +// on the provided decoder. It may return SkipFunc such that unmarshaling can +// move on to the next unmarshal function. However, no mutable method calls may +// be called on the decoder if SkipFunc is returned. +// The pointer to Decoder and the value of T must not be retained +// outside the function call. +func UnmarshalFuncV2[T any](fn func(UnmarshalOptions, *Decoder, T) error) *Unmarshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, false) + typFnc := typedUnmarshaler{ + typ: t, + fnc: func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + prevDepth, prevLength := dec.tokens.depthLength() + err := fn(uo, dec, va.castTo(t).Interface().(T)) + currDepth, currLength := dec.tokens.depthLength() + if err == nil && (prevDepth != currDepth || prevLength+1 != currLength) { + err = errors.New("must read exactly one JSON value") + } + if err != nil { + if err == SkipFunc { + if prevDepth == currDepth && prevLength == currLength { + return SkipFunc + } + err = errors.New("must not read any JSON tokens when skipping") + } + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + }, + maySkip: true, + } + return &Unmarshalers{fncVals: []typedUnmarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// assertCastableTo asserts that "to" is a valid type to be casted to. +// These are the Go types that type-specific arshalers may operate upon. +// +// Let AllTypes be the universal set of all possible Go types. +// This function generally asserts that: +// +// len([from for from in AllTypes if castableTo(from, to)]) > 0 +// +// otherwise it panics. +// +// As a special-case if marshal is false, then we forbid any non-pointer or +// non-interface type since it is almost always a bug trying to unmarshal +// into something where the end-user caller did not pass in an addressable value +// since they will not observe the mutations. +func assertCastableTo(to reflect.Type, marshal bool) { + switch to.Kind() { + case reflect.Interface: + return + case reflect.Pointer: + // Only allow unnamed pointers to be consistent with the fact that + // taking the address of a value produces an unnamed pointer type. + if to.Name() == "" { + return + } + default: + // Technically, non-pointer types are permissible for unmarshal. + // However, they are often a bug since the receiver would be immutable. + // Thus, only allow them for marshaling. + if marshal { + return + } + } + if marshal { + panic(fmt.Sprintf("input type %v must be an interface type, an unnamed pointer type, or a non-pointer type", to)) + } else { + panic(fmt.Sprintf("input type %v must be an interface type or an unnamed pointer type", to)) + } +} + +// castableTo checks whether values of type "from" can be casted to type "to". +// Nil pointer or interface "from" values are never considered castable. +// +// This function must be kept in sync with addressableValue.castTo. +func castableTo(from, to reflect.Type) bool { + switch to.Kind() { + case reflect.Interface: + // TODO: This breaks when ordinary interfaces can have type sets + // since interfaces now exist where only the value form of a type (T) + // implements the interface, but not the pointer variant (*T). + // See https://go.dev/issue/45346. + return reflect.PointerTo(from).Implements(to) + case reflect.Pointer: + // Common case for unmarshaling. + // From must be a concrete or interface type. + return reflect.PointerTo(from) == to + default: + // Common case for marshaling. + // From must be a concrete type. + return from == to + } +} + +// castTo casts va to the specified type. +// If the type is an interface, then the underlying type will always +// be a non-nil pointer to a concrete type. +// +// Requirement: castableTo(va.Type(), to) must hold. +func (va addressableValue) castTo(to reflect.Type) reflect.Value { + switch to.Kind() { + case reflect.Interface: + return va.Addr().Convert(to) + case reflect.Pointer: + return va.Addr() + default: + return va.Value + } +} + +// castableToFromAny reports whether "to" can be casted to from any +// of the dynamic types used to represent arbitrary JSON. +func castableToFromAny(to reflect.Type) bool { + for _, from := range []reflect.Type{anyType, boolType, stringType, float64Type, mapStringAnyType, sliceAnyType} { + if castableTo(from, to) { + return true + } + } + return false +} + +func wrapSkipFunc(err error, what string) error { + if err == SkipFunc { + return errors.New(what + " cannot be skipped") + } + return err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go new file mode 100644 index 000000000..258a98247 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go @@ -0,0 +1,213 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "reflect" +) + +// This package supports "inlining" a Go struct field, where the contents +// of the serialized field (which must be a JSON object) are treated as if +// they are part of the parent Go struct (which represents a JSON object). +// +// Generally, inlined fields are of a Go struct type, where the fields of the +// nested struct are virtually hoisted up to the parent struct using rules +// similar to how Go embedding works (but operating within the JSON namespace). +// +// However, inlined fields may also be of a Go map type with a string key +// or a RawValue. Such inlined fields are called "fallback" fields since they +// represent any arbitrary JSON object member. Explicitly named fields take +// precedence over the inlined fallback. Only one inlined fallback is allowed. + +var rawValueType = reflect.TypeOf((*RawValue)(nil)).Elem() + +// marshalInlinedFallbackAll marshals all the members in an inlined fallback. +func marshalInlinedFallbackAll(mo MarshalOptions, enc *Encoder, va addressableValue, f *structField, insertUnquotedName func([]byte) bool) error { + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], false) + if !v.IsValid() { + return nil // implies a nil inlined field + } + } + v = v.indirect(false) + if !v.IsValid() { + return nil + } + + if v.Type() == rawValueType { + b := v.Interface().(RawValue) + if len(b) == 0 { // TODO: Should this be nil? What if it were all whitespace? + return nil + } + + dec := getBufferedDecoder(b, DecodeOptions{AllowDuplicateNames: true, AllowInvalidUTF8: true}) + defer putBufferedDecoder(dec) + + tok, err := dec.ReadToken() + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if tok.Kind() != '{' { + err := errors.New("inlined raw value must be a JSON object") + return &SemanticError{action: "marshal", JSONKind: tok.Kind(), GoType: rawValueType, Err: err} + } + for dec.PeekKind() != '}' { + // Parse the JSON object name. + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if insertUnquotedName != nil { + name := unescapeStringMayCopy(val, flags.isVerbatim()) + if !insertUnquotedName(name) { + return &SyntacticError{str: "duplicate name " + string(val) + " in object"} + } + } + if err := enc.WriteValue(val); err != nil { + return err + } + + // Parse the JSON object value. + val, err = dec.readValue(&flags) + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if err := enc.WriteValue(val); err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if err := dec.checkEOF(); err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + return nil + } else { + m := v // must be a map[string]V + n := m.Len() + if n == 0 { + return nil + } + mk := newAddressableValue(stringType) + mv := newAddressableValue(m.Type().Elem()) + marshalKey := func(mk addressableValue) error { + b, err := appendString(enc.UnusedBuffer(), mk.String(), !enc.options.AllowInvalidUTF8, nil) + if err != nil { + return err + } + if insertUnquotedName != nil { + isVerbatim := bytes.IndexByte(b, '\\') < 0 + name := unescapeStringMayCopy(b, isVerbatim) + if !insertUnquotedName(name) { + return &SyntacticError{str: "duplicate name " + string(b) + " in object"} + } + } + return enc.WriteValue(b) + } + marshalVal := f.fncs.marshal + if mo.Marshalers != nil { + marshalVal, _ = mo.Marshalers.lookup(marshalVal, mv.Type()) + } + if !mo.Deterministic || n <= 1 { + for iter := m.MapRange(); iter.Next(); { + mk.SetIterKey(iter) + if err := marshalKey(mk); err != nil { + return err + } + mv.Set(iter.Value()) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } + } + } else { + names := getStrings(n) + for i, iter := 0, m.Value.MapRange(); i < n && iter.Next(); i++ { + mk.SetIterKey(iter) + (*names)[i] = mk.String() + } + names.Sort() + for _, name := range *names { + mk.SetString(name) + if err := marshalKey(mk); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use mv.SetMapIndexOf. + mv.Set(m.MapIndex(mk.Value)) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } + } + putStrings(names) + } + return nil + } +} + +// unmarshalInlinedFallbackNext unmarshals only the next member in an inlined fallback. +func unmarshalInlinedFallbackNext(uo UnmarshalOptions, dec *Decoder, va addressableValue, f *structField, quotedName, unquotedName []byte) error { + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], true) + } + v = v.indirect(true) + + if v.Type() == rawValueType { + b := v.Addr().Interface().(*RawValue) + if len(*b) == 0 { // TODO: Should this be nil? What if it were all whitespace? + *b = append(*b, '{') + } else { + *b = trimSuffixWhitespace(*b) + if hasSuffixByte(*b, '}') { + // TODO: When merging into an object for the first time, + // should we verify that it is valid? + *b = trimSuffixByte(*b, '}') + *b = trimSuffixWhitespace(*b) + if !hasSuffixByte(*b, ',') && !hasSuffixByte(*b, '{') { + *b = append(*b, ',') + } + } else { + err := errors.New("inlined raw value must be a JSON object") + return &SemanticError{action: "unmarshal", GoType: rawValueType, Err: err} + } + } + *b = append(*b, quotedName...) + *b = append(*b, ':') + rawValue, err := dec.ReadValue() + if err != nil { + return err + } + *b = append(*b, rawValue...) + *b = append(*b, '}') + return nil + } else { + name := string(unquotedName) // TODO: Intern this? + + m := v // must be a map[string]V + if m.IsNil() { + m.Set(reflect.MakeMap(m.Type())) + } + mk := reflect.ValueOf(name) + mv := newAddressableValue(v.Type().Elem()) // TODO: Cache across calls? + if v2 := m.MapIndex(mk); v2.IsValid() { + mv.Set(v2) + } + + unmarshal := f.fncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, mv.Type()) + } + err := unmarshal(uo, dec, mv) + m.SetMapIndex(mk, mv.Value) + if err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go new file mode 100644 index 000000000..20899c868 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go @@ -0,0 +1,229 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding" + "errors" + "reflect" +) + +// Interfaces for custom serialization. +var ( + jsonMarshalerV1Type = reflect.TypeOf((*MarshalerV1)(nil)).Elem() + jsonMarshalerV2Type = reflect.TypeOf((*MarshalerV2)(nil)).Elem() + jsonUnmarshalerV1Type = reflect.TypeOf((*UnmarshalerV1)(nil)).Elem() + jsonUnmarshalerV2Type = reflect.TypeOf((*UnmarshalerV2)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// MarshalerV1 is implemented by types that can marshal themselves. +// It is recommended that types implement MarshalerV2 unless the implementation +// is trying to avoid a hard dependency on the "jsontext" package. +// +// It is recommended that implementations return a buffer that is safe +// for the caller to retain and potentially mutate. +type MarshalerV1 interface { + MarshalJSON() ([]byte, error) +} + +// MarshalerV2 is implemented by types that can marshal themselves. +// It is recommended that types implement MarshalerV2 instead of MarshalerV1 +// since this is both more performant and flexible. +// If a type implements both MarshalerV1 and MarshalerV2, +// then MarshalerV2 takes precedence. In such a case, both implementations +// should aim to have equivalent behavior for the default marshal options. +// +// The implementation must write only one JSON value to the Encoder and +// must not retain the pointer to Encoder. +type MarshalerV2 interface { + MarshalNextJSON(MarshalOptions, *Encoder) error + + // TODO: Should users call the MarshalOptions.MarshalNext method or + // should/can they call this method directly? Does it matter? +} + +// UnmarshalerV1 is implemented by types that can unmarshal themselves. +// It is recommended that types implement UnmarshalerV2 unless +// the implementation is trying to avoid a hard dependency on this package. +// +// The input can be assumed to be a valid encoding of a JSON value +// if called from unmarshal functionality in this package. +// UnmarshalJSON must copy the JSON data if it is retained after returning. +// It is recommended that UnmarshalJSON implement merge semantics when +// unmarshaling into a pre-populated value. +// +// Implementations must not retain or mutate the input []byte. +type UnmarshalerV1 interface { + UnmarshalJSON([]byte) error +} + +// UnmarshalerV2 is implemented by types that can unmarshal themselves. +// It is recommended that types implement UnmarshalerV2 instead of UnmarshalerV1 +// since this is both more performant and flexible. +// If a type implements both UnmarshalerV1 and UnmarshalerV2, +// then UnmarshalerV2 takes precedence. In such a case, both implementations +// should aim to have equivalent behavior for the default unmarshal options. +// +// The implementation must read only one JSON value from the Decoder. +// It is recommended that UnmarshalNextJSON implement merge semantics when +// unmarshaling into a pre-populated value. +// +// Implementations must not retain the pointer to Decoder. +type UnmarshalerV2 interface { + UnmarshalNextJSON(UnmarshalOptions, *Decoder) error + + // TODO: Should users call the UnmarshalOptions.UnmarshalNext method or + // should/can they call this method directly? Does it matter? +} + +func makeMethodArshaler(fncs *arshaler, t reflect.Type) *arshaler { + // Avoid injecting method arshaler on the pointer or interface version + // to avoid ever calling the method on a nil pointer or interface receiver. + // Let it be injected on the value receiver (which is always addressable). + if t.Kind() == reflect.Pointer || t.Kind() == reflect.Interface { + return fncs + } + + // Handle custom marshaler. + switch which, needAddr := implementsWhich(t, jsonMarshalerV2Type, jsonMarshalerV1Type, textMarshalerType); which { + case jsonMarshalerV2Type: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + prevDepth, prevLength := enc.tokens.depthLength() + err := va.addrWhen(needAddr).Interface().(MarshalerV2).MarshalNextJSON(mo, enc) + currDepth, currLength := enc.tokens.depthLength() + if (prevDepth != currDepth || prevLength+1 != currLength) && err == nil { + err = errors.New("must write exactly one JSON value") + } + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return nil + } + case jsonMarshalerV1Type: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + marshaler := va.addrWhen(needAddr).Interface().(MarshalerV1) + val, err := marshaler.MarshalJSON() + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: RawValue(val).Kind(), GoType: t, Err: err} + } + return nil + } + case textMarshalerType: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + marshaler := va.addrWhen(needAddr).Interface().(encoding.TextMarshaler) + s, err := marshaler.MarshalText() + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + val := enc.UnusedBuffer() + val, err = appendString(val, string(s), true, nil) + if err != nil { + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping syntactic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + return nil + } + } + + // Handle custom unmarshaler. + switch which, needAddr := implementsWhich(t, jsonUnmarshalerV2Type, jsonUnmarshalerV1Type, textUnmarshalerType); which { + case jsonUnmarshalerV2Type: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + prevDepth, prevLength := dec.tokens.depthLength() + err := va.addrWhen(needAddr).Interface().(UnmarshalerV2).UnmarshalNextJSON(uo, dec) + currDepth, currLength := dec.tokens.depthLength() + if (prevDepth != currDepth || prevLength+1 != currLength) && err == nil { + err = errors.New("must read exactly one JSON value") + } + if err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + } + case jsonUnmarshalerV1Type: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + val, err := dec.ReadValue() + if err != nil { + return err // must be a syntactic or I/O error + } + unmarshaler := va.addrWhen(needAddr).Interface().(UnmarshalerV1) + if err := unmarshaler.UnmarshalJSON(val); err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + } + case textUnmarshalerType: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err // must be a syntactic or I/O error + } + if val.Kind() != '"' { + err = errors.New("JSON value must be string type") + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + s := unescapeStringMayCopy(val, flags.isVerbatim()) + unmarshaler := va.addrWhen(needAddr).Interface().(encoding.TextUnmarshaler) + if err := unmarshaler.UnmarshalText(s); err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + } + } + + return fncs +} + +// implementsWhich is like t.Implements(ifaceType) for a list of interfaces, +// but checks whether either t or reflect.PointerTo(t) implements the interface. +// It returns the first interface type that matches and whether a value of t +// needs to be addressed first before it implements the interface. +func implementsWhich(t reflect.Type, ifaceTypes ...reflect.Type) (which reflect.Type, needAddr bool) { + for _, ifaceType := range ifaceTypes { + switch { + case t.Implements(ifaceType): + return ifaceType, false + case reflect.PointerTo(t).Implements(ifaceType): + return ifaceType, true + } + } + return nil, false +} + +// addrWhen returns va.Addr if addr is specified, otherwise it returns itself. +func (va addressableValue) addrWhen(addr bool) reflect.Value { + if addr { + return va.Addr() + } + return va.Value +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go new file mode 100644 index 000000000..fc8d5b007 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go @@ -0,0 +1,241 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" +) + +var ( + timeDurationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + timeTimeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { + // Ideally, time types would implement MarshalerV2 and UnmarshalerV2, + // but that would incur a dependency on package json from package time. + // Given how widely used time is, it is more acceptable that we incur a + // dependency on time from json. + // + // Injecting the arshaling functionality like this will not be identical + // to actually declaring methods on the time types since embedding of the + // time types will not be able to forward this functionality. + switch t { + case timeDurationType: + fncs.nonDefault = true + marshalNanos := fncs.marshal + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "nanos" { + mo.format = "" + return marshalNanos(mo, enc, va) + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + td := va.Interface().(time.Duration) + b := enc.UnusedBuffer() + b = append(b, '"') + b = append(b, td.String()...) // never contains special characters + b = append(b, '"') + return enc.WriteValue(b) + } + unmarshalNanos := fncs.unmarshal + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + // TODO: Should there be a flag that specifies that we can unmarshal + // from either form since there would be no ambiguity? + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "nanos" { + uo.format = "" + return unmarshalNanos(uo, dec, va) + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + + var flags valueFlags + td := va.Addr().Interface().(*time.Duration) + val, err := dec.readValue(&flags) + if err != nil { + return err + } + switch k := val.Kind(); k { + case 'n': + *td = time.Duration(0) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + td2, err := time.ParseDuration(string(val)) + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + *td = td2 + return nil + default: + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + } + case timeTimeType: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + format := time.RFC3339Nano + isRFC3339 := true + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + var err error + format, isRFC3339, err = checkTimeFormat(mo.format) + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + } + + tt := va.Interface().(time.Time) + b := enc.UnusedBuffer() + b = append(b, '"') + b = tt.AppendFormat(b, format) + b = append(b, '"') + if isRFC3339 { + // Not all Go timestamps can be represented as valid RFC 3339. + // Explicitly check for these edge cases. + // See https://go.dev/issue/4556 and https://go.dev/issue/54580. + var err error + switch b := b[len(`"`) : len(b)-len(`"`)]; { + case b[len("9999")] != '-': // year must be exactly 4 digits wide + err = errors.New("year outside of range [0,9999]") + case b[len(b)-1] != 'Z': + c := b[len(b)-len("Z07:00")] + if ('0' <= c && c <= '9') || parseDec2(b[len(b)-len("07:00"):]) >= 24 { + err = errors.New("timezone hour outside of range [0,23]") + } + } + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteValue(b) // RFC 3339 never needs JSON escaping + } + // The format may contain special characters that need escaping. + // Verify that the result is a valid JSON string (common case), + // otherwise escape the string correctly (slower case). + if consumeSimpleString(b) != len(b) { + b, _ = appendString(nil, string(b[len(`"`):len(b)-len(`"`)]), true, nil) + } + return enc.WriteValue(b) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + format := time.RFC3339 + isRFC3339 := true + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + var err error + format, isRFC3339, err = checkTimeFormat(uo.format) + if err != nil { + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + } + + var flags valueFlags + tt := va.Addr().Interface().(*time.Time) + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + *tt = time.Time{} + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + tt2, err := time.Parse(format, string(val)) + if isRFC3339 && err == nil { + // TODO(https://go.dev/issue/54580): RFC 3339 specifies + // the exact grammar of a valid timestamp. However, + // the parsing functionality in "time" is too loose and + // incorrectly accepts invalid timestamps as valid. + // Remove these manual checks when "time" checks it for us. + newParseError := func(layout, value, layoutElem, valueElem, message string) error { + return &time.ParseError{Layout: layout, Value: value, LayoutElem: layoutElem, ValueElem: valueElem, Message: message} + } + switch { + case val[len("2006-01-02T")+1] == ':': // hour must be two digits + err = newParseError(format, string(val), "15", string(val[len("2006-01-02T"):][:1]), "") + case val[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + err = newParseError(format, string(val), ".", ",", "") + case val[len(val)-1] != 'Z': + switch { + case parseDec2(val[len(val)-len("07:00"):]) >= 24: // timezone hour must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone hour out of range") + case parseDec2(val[len(val)-len("00"):]) >= 60: // timezone minute must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone minute out of range") + } + } + } + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + *tt = tt2 + return nil + default: + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + } + } + return fncs +} + +func checkTimeFormat(format string) (string, bool, error) { + // We assume that an exported constant in the time package will + // always start with an uppercase ASCII letter. + if len(format) > 0 && 'A' <= format[0] && format[0] <= 'Z' { + switch format { + case "ANSIC": + return time.ANSIC, false, nil + case "UnixDate": + return time.UnixDate, false, nil + case "RubyDate": + return time.RubyDate, false, nil + case "RFC822": + return time.RFC822, false, nil + case "RFC822Z": + return time.RFC822Z, false, nil + case "RFC850": + return time.RFC850, false, nil + case "RFC1123": + return time.RFC1123, false, nil + case "RFC1123Z": + return time.RFC1123Z, false, nil + case "RFC3339": + return time.RFC3339, true, nil + case "RFC3339Nano": + return time.RFC3339Nano, true, nil + case "Kitchen": + return time.Kitchen, false, nil + case "Stamp": + return time.Stamp, false, nil + case "StampMilli": + return time.StampMilli, false, nil + case "StampMicro": + return time.StampMicro, false, nil + case "StampNano": + return time.StampNano, false, nil + default: + // Reject any format that is an exported Go identifier in case + // new format constants are added to the time package. + if strings.TrimFunc(format, isLetterOrDigit) == "" { + return "", false, fmt.Errorf("undefined format layout: %v", format) + } + } + } + return format, false, nil +} + +// parseDec2 parses b as an unsigned, base-10, 2-digit number. +// It panics if len(b) < 2. The result is undefined if digits are not base-10. +func parseDec2(b []byte) byte { + return 10*(b[0]-'0') + (b[1] - '0') +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go new file mode 100644 index 000000000..0d68b3233 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go @@ -0,0 +1,1655 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "io" + "math" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +// NOTE: The logic for decoding is complicated by the fact that reading from +// an io.Reader into a temporary buffer means that the buffer may contain a +// truncated portion of some valid input, requiring the need to fetch more data. +// +// This file is structured in the following way: +// +// - consumeXXX functions parse an exact JSON token from a []byte. +// If the buffer appears truncated, then it returns io.ErrUnexpectedEOF. +// The consumeSimpleXXX functions are so named because they only handle +// a subset of the grammar for the JSON token being parsed. +// They do not handle the full grammar to keep these functions inlineable. +// +// - Decoder.consumeXXX methods parse the next JSON token from Decoder.buf, +// automatically fetching more input if necessary. These methods take +// a position relative to the start of Decoder.buf as an argument and +// return the end of the consumed JSON token as a position, +// also relative to the start of Decoder.buf. +// +// - In the event of an I/O errors or state machine violations, +// the implementation avoids mutating the state of Decoder +// (aside from the book-keeping needed to implement Decoder.fetch). +// For this reason, only Decoder.ReadToken and Decoder.ReadValue are +// responsible for updated Decoder.prevStart and Decoder.prevEnd. +// +// - For performance, much of the implementation uses the pattern of calling +// the inlineable consumeXXX functions first, and if more work is necessary, +// then it calls the slower Decoder.consumeXXX methods. +// TODO: Revisit this pattern if the Go compiler provides finer control +// over exactly which calls are inlined or not. + +// DecodeOptions configures how JSON decoding operates. +// The zero value is equivalent to the default settings, +// which is compliant with both RFC 7493 and RFC 8259. +type DecodeOptions struct { + requireKeyedLiterals + nonComparable + + // AllowDuplicateNames specifies that JSON objects may contain + // duplicate member names. Disabling the duplicate name check may provide + // computational and performance benefits, but breaks compliance with + // RFC 7493, section 2.3. The input will still be compliant with RFC 8259, + // which leaves the handling of duplicate names as unspecified behavior. + AllowDuplicateNames bool + + // AllowInvalidUTF8 specifies that JSON strings may contain invalid UTF-8, + // which will be mangled as the Unicode replacement character, U+FFFD. + // This causes the decoder to break compliance with + // RFC 7493, section 2.1, and RFC 8259, section 8.1. + AllowInvalidUTF8 bool +} + +// Decoder is a streaming decoder for raw JSON tokens and values. +// It is used to read a stream of top-level JSON values, +// each separated by optional whitespace characters. +// +// ReadToken and ReadValue calls may be interleaved. +// For example, the following JSON value: +// +// {"name":"value","array":[null,false,true,3.14159],"object":{"k":"v"}} +// +// can be parsed with the following calls (ignoring errors for brevity): +// +// d.ReadToken() // { +// d.ReadToken() // "name" +// d.ReadToken() // "value" +// d.ReadValue() // "array" +// d.ReadToken() // [ +// d.ReadToken() // null +// d.ReadToken() // false +// d.ReadValue() // true +// d.ReadToken() // 3.14159 +// d.ReadToken() // ] +// d.ReadValue() // "object" +// d.ReadValue() // {"k":"v"} +// d.ReadToken() // } +// +// The above is one of many possible sequence of calls and +// may not represent the most sensible method to call for any given token/value. +// For example, it is probably more common to call ReadToken to obtain a +// string token for object names. +type Decoder struct { + state + decodeBuffer + options DecodeOptions + + stringCache *stringCache // only used when unmarshaling +} + +// decodeBuffer is a buffer split into 4 segments: +// +// - buf[0:prevEnd] // already read portion of the buffer +// - buf[prevStart:prevEnd] // previously read value +// - buf[prevEnd:len(buf)] // unread portion of the buffer +// - buf[len(buf):cap(buf)] // unused portion of the buffer +// +// Invariants: +// +// 0 ≤ prevStart ≤ prevEnd ≤ len(buf) ≤ cap(buf) +type decodeBuffer struct { + peekPos int // non-zero if valid offset into buf for start of next token + peekErr error // implies peekPos is -1 + + buf []byte // may alias rd if it is a bytes.Buffer + prevStart int + prevEnd int + + // baseOffset is added to prevStart and prevEnd to obtain + // the absolute offset relative to the start of io.Reader stream. + baseOffset int64 + + rd io.Reader +} + +// NewDecoder constructs a new streaming decoder reading from r. +// +// If r is a bytes.Buffer, then the decoder parses directly from the buffer +// without first copying the contents to an intermediate buffer. +// Additional writes to the buffer must not occur while the decoder is in use. +func NewDecoder(r io.Reader) *Decoder { + return DecodeOptions{}.NewDecoder(r) +} + +// NewDecoder constructs a new streaming decoder reading from r +// configured with the provided options. +func (o DecodeOptions) NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + o.ResetDecoder(d, r) + return d +} + +// ResetDecoder resets a decoder such that it is reading afresh from r and +// configured with the provided options. +func (o DecodeOptions) ResetDecoder(d *Decoder, r io.Reader) { + if d == nil { + panic("json: invalid nil Decoder") + } + if r == nil { + panic("json: invalid nil io.Reader") + } + d.reset(nil, r, o) +} + +func (d *Decoder) reset(b []byte, r io.Reader, o DecodeOptions) { + d.state.reset() + d.decodeBuffer = decodeBuffer{buf: b, rd: r} + d.options = o +} + +// Reset resets a decoder such that it is reading afresh from r but +// keep any pre-existing decoder options. +func (d *Decoder) Reset(r io.Reader) { + d.options.ResetDecoder(d, r) +} + +var errBufferWriteAfterNext = errors.New("invalid bytes.Buffer.Write call after calling bytes.Buffer.Next") + +// fetch reads at least 1 byte from the underlying io.Reader. +// It returns io.ErrUnexpectedEOF if zero bytes were read and io.EOF was seen. +func (d *Decoder) fetch() error { + if d.rd == nil { + return io.ErrUnexpectedEOF + } + + // Inform objectNameStack that we are about to fetch new buffer content. + d.names.copyQuotedBuffer(d.buf) + + // Specialize bytes.Buffer for better performance. + if bb, ok := d.rd.(*bytes.Buffer); ok { + switch { + case bb.Len() == 0: + return io.ErrUnexpectedEOF + case len(d.buf) == 0: + d.buf = bb.Next(bb.Len()) // "read" all data in the buffer + return nil + default: + // This only occurs if a partially filled bytes.Buffer was provided + // and more data is written to it while Decoder is reading from it. + // This practice will lead to data corruption since future writes + // may overwrite the contents of the current buffer. + // + // The user is trying to use a bytes.Buffer as a pipe, + // but a bytes.Buffer is poor implementation of a pipe, + // the purpose-built io.Pipe should be used instead. + return &ioError{action: "read", err: errBufferWriteAfterNext} + } + } + + // Allocate initial buffer if empty. + if cap(d.buf) == 0 { + d.buf = make([]byte, 0, 64) + } + + // Check whether to grow the buffer. + const maxBufferSize = 4 << 10 + const growthSizeFactor = 2 // higher value is faster + const growthRateFactor = 2 // higher value is slower + // By default, grow if below the maximum buffer size. + grow := cap(d.buf) <= maxBufferSize/growthSizeFactor + // Growing can be expensive, so only grow + // if a sufficient number of bytes have been processed. + grow = grow && int64(cap(d.buf)) < d.previousOffsetEnd()/growthRateFactor + // If prevStart==0, then fetch was called in order to fetch more data + // to finish consuming a large JSON value contiguously. + // Grow if less than 25% of the remaining capacity is available. + // Note that this may cause the input buffer to exceed maxBufferSize. + grow = grow || (d.prevStart == 0 && len(d.buf) >= 3*cap(d.buf)/4) + + if grow { + // Allocate a new buffer and copy the contents of the old buffer over. + // TODO: Provide a hard limit on the maximum internal buffer size? + buf := make([]byte, 0, cap(d.buf)*growthSizeFactor) + d.buf = append(buf, d.buf[d.prevStart:]...) + } else { + // Move unread portion of the data to the front. + n := copy(d.buf[:cap(d.buf)], d.buf[d.prevStart:]) + d.buf = d.buf[:n] + } + d.baseOffset += int64(d.prevStart) + d.prevEnd -= d.prevStart + d.prevStart = 0 + + // Read more data into the internal buffer. + for { + n, err := d.rd.Read(d.buf[len(d.buf):cap(d.buf)]) + switch { + case n > 0: + d.buf = d.buf[:len(d.buf)+n] + return nil // ignore errors if any bytes are read + case err == io.EOF: + return io.ErrUnexpectedEOF + case err != nil: + return &ioError{action: "read", err: err} + default: + continue // Read returned (0, nil) + } + } +} + +const invalidateBufferByte = '#' // invalid starting character for JSON grammar + +// invalidatePreviousRead invalidates buffers returned by Peek and Read calls +// so that the first byte is an invalid character. +// This Hyrum-proofs the API against faulty application code that assumes +// values returned by ReadValue remain valid past subsequent Read calls. +func (d *decodeBuffer) invalidatePreviousRead() { + // Avoid mutating the buffer if d.rd is nil which implies that d.buf + // is provided by the user code and may not expect mutations. + isBytesBuffer := func(r io.Reader) bool { + _, ok := r.(*bytes.Buffer) + return ok + } + if d.rd != nil && !isBytesBuffer(d.rd) && d.prevStart < d.prevEnd && uint(d.prevStart) < uint(len(d.buf)) { + d.buf[d.prevStart] = invalidateBufferByte + d.prevStart = d.prevEnd + } +} + +// needMore reports whether there are no more unread bytes. +func (d *decodeBuffer) needMore(pos int) bool { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + return pos == len(d.buf) +} + +// injectSyntacticErrorWithPosition wraps a SyntacticError with the position, +// otherwise it returns the error as is. +// It takes a position relative to the start of the start of d.buf. +func (d *decodeBuffer) injectSyntacticErrorWithPosition(err error, pos int) error { + if serr, ok := err.(*SyntacticError); ok { + return serr.withOffset(d.baseOffset + int64(pos)) + } + return err +} + +func (d *decodeBuffer) previousOffsetStart() int64 { return d.baseOffset + int64(d.prevStart) } +func (d *decodeBuffer) previousOffsetEnd() int64 { return d.baseOffset + int64(d.prevEnd) } +func (d *decodeBuffer) previousBuffer() []byte { return d.buf[d.prevStart:d.prevEnd] } +func (d *decodeBuffer) unreadBuffer() []byte { return d.buf[d.prevEnd:len(d.buf)] } + +// PeekKind retrieves the next token kind, but does not advance the read offset. +// It returns 0 if there are no more tokens. +func (d *Decoder) PeekKind() Kind { + // Check whether we have a cached peek result. + if d.peekPos > 0 { + return Kind(d.buf[d.peekPos]).normalize() + } + + var err error + d.invalidatePreviousRead() + pos := d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + d.peekPos, d.peekErr = -1, err + return invalidKind + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + d.peekPos, d.peekErr = -1, err + return invalidKind + } + } + } + next := Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + err = d.injectSyntacticErrorWithPosition(err, pos) + d.peekPos, d.peekErr = -1, err + return invalidKind + } + + // This may set peekPos to zero, which is indistinguishable from + // the uninitialized state. While a small hit to performance, it is correct + // since ReadValue and ReadToken will disregard the cached result and + // recompute the next kind. + d.peekPos, d.peekErr = pos, nil + return next +} + +// SkipValue is semantically equivalent to calling ReadValue and discarding +// the result except that memory is not wasted trying to hold the entire result. +func (d *Decoder) SkipValue() error { + switch d.PeekKind() { + case '{', '[': + // For JSON objects and arrays, keep skipping all tokens + // until the depth matches the starting depth. + depth := d.tokens.depth() + for { + if _, err := d.ReadToken(); err != nil { + return err + } + if depth >= d.tokens.depth() { + return nil + } + } + default: + // Trying to skip a value when the next token is a '}' or ']' + // will result in an error being returned here. + if _, err := d.ReadValue(); err != nil { + return err + } + return nil + } +} + +// ReadToken reads the next Token, advancing the read offset. +// The returned token is only valid until the next Peek, Read, or Skip call. +// It returns io.EOF if there are no more tokens. +func (d *Decoder) ReadToken() (Token, error) { + // Determine the next kind. + var err error + var next Kind + pos := d.peekPos + if pos != 0 { + // Use cached peek result. + if d.peekErr != nil { + err := d.peekErr + d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error + return Token{}, err + } + next = Kind(d.buf[pos]).normalize() + d.peekPos = 0 // reset cache + } else { + d.invalidatePreviousRead() + pos = d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + return Token{}, err + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return Token{}, err + } + } + } + next = Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } + + // Handle the next token. + var n int + switch next { + case 'n': + if consumeNull(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "null") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("null") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("null")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return Null, nil + + case 'f': + if consumeFalse(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "false") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("false") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("false")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return False, nil + + case 't': + if consumeTrue(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "true") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("true") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("true")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return True, nil + + case '"': + var flags valueFlags // TODO: Preserve this in Token? + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeString(&flags, pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += n + } + if !d.options.AllowDuplicateNames && d.tokens.last.needObjectName() { + if !d.tokens.last.isValidNamespace() { + return Token{}, errInvalidNamespace + } + if d.tokens.last.isActiveNamespace() && !d.namespaces.last().insertQuoted(d.buf[pos-n:pos], flags.isVerbatim()) { + err = &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of string + } + d.names.replaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds + } + if err = d.tokens.appendString(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of string + } + d.prevStart, d.prevEnd = pos-n, pos + return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil + + case '0': + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if n = consumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeNumber(pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += n + } + if err = d.tokens.appendNumber(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of number + } + d.prevStart, d.prevEnd = pos-n, pos + return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil + + case '{': + if err = d.tokens.pushObject(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + if !d.options.AllowDuplicateNames { + d.names.push() + d.namespaces.push() + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ObjectStart, nil + + case '}': + if err = d.tokens.popObject(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + if !d.options.AllowDuplicateNames { + d.names.pop() + d.namespaces.pop() + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ObjectEnd, nil + + case '[': + if err = d.tokens.pushArray(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ArrayStart, nil + + case ']': + if err = d.tokens.popArray(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ArrayEnd, nil + + default: + err = newInvalidCharacterError(d.buf[pos:], "at start of token") + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } +} + +type valueFlags uint + +const ( + _ valueFlags = (1 << iota) / 2 // powers of two starting with zero + + stringNonVerbatim // string cannot be naively treated as valid UTF-8 + stringNonCanonical // string not formatted according to RFC 8785, section 3.2.2.2. + // TODO: Track whether a number is a non-integer? +) + +func (f *valueFlags) set(f2 valueFlags) { *f |= f2 } +func (f valueFlags) isVerbatim() bool { return f&stringNonVerbatim == 0 } +func (f valueFlags) isCanonical() bool { return f&stringNonCanonical == 0 } + +// ReadValue returns the next raw JSON value, advancing the read offset. +// The value is stripped of any leading or trailing whitespace. +// The returned value is only valid until the next Peek, Read, or Skip call and +// may not be mutated while the Decoder remains in use. +// If the decoder is currently at the end token for an object or array, +// then it reports a SyntacticError and the internal state remains unchanged. +// It returns io.EOF if there are no more values. +func (d *Decoder) ReadValue() (RawValue, error) { + var flags valueFlags + return d.readValue(&flags) +} +func (d *Decoder) readValue(flags *valueFlags) (RawValue, error) { + // Determine the next kind. + var err error + var next Kind + pos := d.peekPos + if pos != 0 { + // Use cached peek result. + if d.peekErr != nil { + err := d.peekErr + d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error + return nil, err + } + next = Kind(d.buf[pos]).normalize() + d.peekPos = 0 // reset cache + } else { + d.invalidatePreviousRead() + pos = d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + return nil, err + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return nil, err + } + } + } + next = Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + return nil, d.injectSyntacticErrorWithPosition(err, pos) + } + } + + // Handle the next value. + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeValue(flags, pos) + newAbsPos := d.baseOffset + int64(pos) + n := int(newAbsPos - oldAbsPos) + if err != nil { + return nil, d.injectSyntacticErrorWithPosition(err, pos) + } + switch next { + case 'n', 't', 'f': + err = d.tokens.appendLiteral() + case '"': + if !d.options.AllowDuplicateNames && d.tokens.last.needObjectName() { + if !d.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if d.tokens.last.isActiveNamespace() && !d.namespaces.last().insertQuoted(d.buf[pos-n:pos], flags.isVerbatim()) { + err = &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + break + } + d.names.replaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds + } + err = d.tokens.appendString() + case '0': + err = d.tokens.appendNumber() + case '{': + if err = d.tokens.pushObject(); err != nil { + break + } + if err = d.tokens.popObject(); err != nil { + panic("BUG: popObject should never fail immediately after pushObject: " + err.Error()) + } + case '[': + if err = d.tokens.pushArray(); err != nil { + break + } + if err = d.tokens.popArray(); err != nil { + panic("BUG: popArray should never fail immediately after pushArray: " + err.Error()) + } + } + if err != nil { + return nil, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of value + } + d.prevEnd = pos + d.prevStart = pos - n + return d.buf[pos-n : pos : pos], nil +} + +// checkEOF verifies that the input has no more data. +func (d *Decoder) checkEOF() error { + switch pos, err := d.consumeWhitespace(d.prevEnd); err { + case nil: + return newInvalidCharacterError(d.buf[pos:], "after top-level value") + case io.ErrUnexpectedEOF: + return nil + default: + return err + } +} + +// consumeWhitespace consumes all whitespace starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the last whitespace. +// If it returns nil, there is guaranteed to at least be one unread byte. +// +// The following pattern is common in this implementation: +// +// pos += consumeWhitespace(d.buf[pos:]) +// if d.needMore(pos) { +// if pos, err = d.consumeWhitespace(pos); err != nil { +// return ... +// } +// } +// +// It is difficult to simplify this without sacrificing performance since +// consumeWhitespace must be inlined. The body of the if statement is +// executed only in rare situations where we need to fetch more data. +// Since fetching may return an error, we also need to check the error. +func (d *Decoder) consumeWhitespace(pos int) (newPos int, err error) { + for { + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos, nil + } +} + +// consumeValue consumes a single JSON value starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the value. +func (d *Decoder) consumeValue(flags *valueFlags, pos int) (newPos int, err error) { + for { + var n int + var err error + switch next := Kind(d.buf[pos]).normalize(); next { + case 'n': + if n = consumeNull(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "null") + } + case 'f': + if n = consumeFalse(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "false") + } + case 't': + if n = consumeTrue(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "true") + } + case '"': + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + return d.consumeString(flags, pos) + } + case '0': + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if n = consumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) { + return d.consumeNumber(pos) + } + case '{': + return d.consumeObject(flags, pos) + case '[': + return d.consumeArray(flags, pos) + default: + return pos, newInvalidCharacterError(d.buf[pos:], "at start of value") + } + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeLiteral consumes a single JSON literal starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the literal. +func (d *Decoder) consumeLiteral(pos int, lit string) (newPos int, err error) { + for { + n, err := consumeLiteral(d.buf[pos:], lit) + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeString consumes a single JSON string starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the string. +func (d *Decoder) consumeString(flags *valueFlags, pos int) (newPos int, err error) { + var n int + for { + n, err = consumeStringResumable(flags, d.buf[pos:], n, !d.options.AllowInvalidUTF8) + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeNumber consumes a single JSON number starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the number. +func (d *Decoder) consumeNumber(pos int) (newPos int, err error) { + var n int + var state consumeNumberState + for { + n, state, err = consumeNumberResumable(d.buf[pos:], n, state) + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if err == io.ErrUnexpectedEOF || d.needMore(pos+n) { + mayTerminate := err == nil + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + if mayTerminate && err == io.ErrUnexpectedEOF { + return pos + n, nil + } + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeObject consumes a single JSON object starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the object. +func (d *Decoder) consumeObject(flags *valueFlags, pos int) (newPos int, err error) { + var n int + var names *objectNamespace + if !d.options.AllowDuplicateNames { + d.namespaces.push() + defer d.namespaces.pop() + names = d.namespaces.last() + } + + // Handle before start. + if d.buf[pos] != '{' { + panic("BUG: consumeObject must be called with a buffer that starts with '{'") + } + pos++ + + // Handle after start. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] == '}' { + pos++ + return pos, nil + } + + for { + // Handle before name. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + var flags2 valueFlags + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeString(&flags2, pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + flags.set(flags2) + if err != nil { + return pos, err + } + } else { + pos += n + } + if !d.options.AllowDuplicateNames && !names.insertQuoted(d.buf[pos-n:pos], flags2.isVerbatim()) { + return pos - n, &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + } + + // Handle after name. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] != ':' { + return pos, newInvalidCharacterError(d.buf[pos:], "after object name (expecting ':')") + } + pos++ + + // Handle before value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + pos, err = d.consumeValue(flags, pos) + if err != nil { + return pos, err + } + + // Handle after value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + switch d.buf[pos] { + case ',': + pos++ + continue + case '}': + pos++ + return pos, nil + default: + return pos, newInvalidCharacterError(d.buf[pos:], "after object value (expecting ',' or '}')") + } + } +} + +// consumeArray consumes a single JSON array starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the array. +func (d *Decoder) consumeArray(flags *valueFlags, pos int) (newPos int, err error) { + // Handle before start. + if d.buf[pos] != '[' { + panic("BUG: consumeArray must be called with a buffer that starts with '['") + } + pos++ + + // Handle after start. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] == ']' { + pos++ + return pos, nil + } + + for { + // Handle before value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + pos, err = d.consumeValue(flags, pos) + if err != nil { + return pos, err + } + + // Handle after value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + switch d.buf[pos] { + case ',': + pos++ + continue + case ']': + pos++ + return pos, nil + default: + return pos, newInvalidCharacterError(d.buf[pos:], "after array value (expecting ',' or ']')") + } + } +} + +// InputOffset returns the current input byte offset. It gives the location +// of the next byte immediately after the most recently returned token or value. +// The number of bytes actually read from the underlying io.Reader may be more +// than this offset due to internal buffering effects. +func (d *Decoder) InputOffset() int64 { + return d.previousOffsetEnd() +} + +// UnreadBuffer returns the data remaining in the unread buffer, +// which may contain zero or more bytes. +// The returned buffer must not be mutated while Decoder continues to be used. +// The buffer contents are valid until the next Peek, Read, or Skip call. +func (d *Decoder) UnreadBuffer() []byte { + return d.unreadBuffer() +} + +// StackDepth returns the depth of the state machine for read JSON data. +// Each level on the stack represents a nested JSON object or array. +// It is incremented whenever an ObjectStart or ArrayStart token is encountered +// and decremented whenever an ObjectEnd or ArrayEnd token is encountered. +// The depth is zero-indexed, where zero represents the top-level JSON value. +func (d *Decoder) StackDepth() int { + // NOTE: Keep in sync with Encoder.StackDepth. + return d.tokens.depth() - 1 +} + +// StackIndex returns information about the specified stack level. +// It must be a number between 0 and StackDepth, inclusive. +// For each level, it reports the kind: +// +// - 0 for a level of zero, +// - '{' for a level representing a JSON object, and +// - '[' for a level representing a JSON array. +// +// It also reports the length of that JSON object or array. +// Each name and value in a JSON object is counted separately, +// so the effective number of members would be half the length. +// A complete JSON object must have an even length. +func (d *Decoder) StackIndex(i int) (Kind, int) { + // NOTE: Keep in sync with Encoder.StackIndex. + switch s := d.tokens.index(i); { + case i > 0 && s.isObject(): + return '{', s.length() + case i > 0 && s.isArray(): + return '[', s.length() + default: + return 0, s.length() + } +} + +// StackPointer returns a JSON Pointer (RFC 6901) to the most recently read value. +// Object names are only present if AllowDuplicateNames is false, otherwise +// object members are represented using their index within the object. +func (d *Decoder) StackPointer() string { + d.names.copyQuotedBuffer(d.buf) + return string(d.appendStackPointer(nil)) +} + +// consumeWhitespace consumes leading JSON whitespace per RFC 7159, section 2. +func consumeWhitespace(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + for len(b) > n && (b[n] == ' ' || b[n] == '\t' || b[n] == '\r' || b[n] == '\n') { + n++ + } + return n +} + +// consumeNull consumes the next JSON null literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeNull(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "null" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeFalse consumes the next JSON false literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeFalse(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "false" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeTrue consumes the next JSON true literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeTrue(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "true" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeLiteral consumes the next JSON literal per RFC 7159, section 3. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +func consumeLiteral(b []byte, lit string) (n int, err error) { + for i := 0; i < len(b) && i < len(lit); i++ { + if b[i] != lit[i] { + return i, newInvalidCharacterError(b[i:], "within literal "+lit+" (expecting "+strconv.QuoteRune(rune(lit[i]))+")") + } + } + if len(b) < len(lit) { + return len(b), io.ErrUnexpectedEOF + } + return len(lit), nil +} + +// consumeSimpleString consumes the next JSON string per RFC 7159, section 7 +// but is limited to the grammar for an ASCII string without escape sequences. +// It returns 0 if it is invalid or more complicated than a simple string, +// in which case consumeString should be called. +func consumeSimpleString(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[0] == '"' { + n++ + for len(b) > n && (' ' <= b[n] && b[n] != '\\' && b[n] != '"' && b[n] < utf8.RuneSelf) { + n++ + } + if len(b) > n && b[n] == '"' { + n++ + return n + } + } + return 0 +} + +// consumeString consumes the next JSON string per RFC 7159, section 7. +// If validateUTF8 is false, then this allows the presence of invalid UTF-8 +// characters within the string itself. +// It reports the number of bytes consumed and whether an error was encountered. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +func consumeString(flags *valueFlags, b []byte, validateUTF8 bool) (n int, err error) { + return consumeStringResumable(flags, b, 0, validateUTF8) +} + +// consumeStringResumable is identical to consumeString but supports resuming +// from a previous call that returned io.ErrUnexpectedEOF. +func consumeStringResumable(flags *valueFlags, b []byte, resumeOffset int, validateUTF8 bool) (n int, err error) { + // Consume the leading double quote. + switch { + case resumeOffset > 0: + n = resumeOffset // already handled the leading quote + case uint(len(b)) == 0: + return n, io.ErrUnexpectedEOF + case b[0] == '"': + n++ + default: + return n, newInvalidCharacterError(b[n:], `at start of string (expecting '"')`) + } + + // Consume every character in the string. + for uint(len(b)) > uint(n) { + // Optimize for long sequences of unescaped characters. + noEscape := func(c byte) bool { + return c < utf8.RuneSelf && ' ' <= c && c != '\\' && c != '"' + } + for uint(len(b)) > uint(n) && noEscape(b[n]) { + n++ + } + if uint(len(b)) <= uint(n) { + return n, io.ErrUnexpectedEOF + } + + // Check for terminating double quote. + if b[n] == '"' { + n++ + return n, nil + } + + switch r, rn := utf8.DecodeRune(b[n:]); { + // Handle UTF-8 encoded byte sequence. + // Due to specialized handling of ASCII above, we know that + // all normal sequences at this point must be 2 bytes or larger. + case rn > 1: + n += rn + // Handle escape sequence. + case r == '\\': + flags.set(stringNonVerbatim) + resumeOffset = n + if uint(len(b)) < uint(n+2) { + return resumeOffset, io.ErrUnexpectedEOF + } + switch r := b[n+1]; r { + case '/': + // Forward slash is the only character with 3 representations. + // Per RFC 8785, section 3.2.2.2., this must not be escaped. + flags.set(stringNonCanonical) + n += 2 + case '"', '\\', 'b', 'f', 'n', 'r', 't': + n += 2 + case 'u': + if uint(len(b)) < uint(n+6) { + if !hasEscapeSequencePrefix(b[n:]) { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:])) + " within string"} + } + return resumeOffset, io.ErrUnexpectedEOF + } + v1, ok := parseHexUint16(b[n+2 : n+6]) + if !ok { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} + } + // Only certain control characters can use the \uFFFF notation + // for canonical formatting (per RFC 8785, section 3.2.2.2.). + switch v1 { + // \uFFFF notation not permitted for these characters. + case '\b', '\f', '\n', '\r', '\t': + flags.set(stringNonCanonical) + default: + // \uFFFF notation only permitted for control characters. + if v1 >= ' ' { + flags.set(stringNonCanonical) + } else { + // \uFFFF notation must be lower case. + for _, c := range b[n+2 : n+6] { + if 'A' <= c && c <= 'F' { + flags.set(stringNonCanonical) + } + } + } + } + n += 6 + + if validateUTF8 && utf16.IsSurrogate(rune(v1)) { + if uint(len(b)) >= uint(n+2) && (b[n] != '\\' || b[n+1] != 'u') { + return n, &SyntacticError{str: "invalid unpaired surrogate half within string"} + } + if uint(len(b)) < uint(n+6) { + if !hasEscapeSequencePrefix(b[n:]) { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:])) + " within string"} + } + return resumeOffset, io.ErrUnexpectedEOF + } + v2, ok := parseHexUint16(b[n+2 : n+6]) + if !ok { + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} + } + if utf16.DecodeRune(rune(v1), rune(v2)) == utf8.RuneError { + return n, &SyntacticError{str: "invalid surrogate pair in string"} + } + n += 6 + } + default: + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+2])) + " within string"} + } + // Handle invalid UTF-8. + case r == utf8.RuneError: + if !utf8.FullRune(b[n:]) { + return n, io.ErrUnexpectedEOF + } + flags.set(stringNonVerbatim | stringNonCanonical) + if validateUTF8 { + return n, &SyntacticError{str: "invalid UTF-8 within string"} + } + n++ + // Handle invalid control characters. + case r < ' ': + flags.set(stringNonVerbatim | stringNonCanonical) + return n, newInvalidCharacterError(b[n:], "within string (expecting non-control character)") + default: + panic("BUG: unhandled character " + quoteRune(b[n:])) + } + } + return n, io.ErrUnexpectedEOF +} + +// hasEscapeSequencePrefix reports whether b is possibly +// the truncated prefix of a \uFFFF escape sequence. +func hasEscapeSequencePrefix(b []byte) bool { + for i, c := range b { + switch { + case i == 0 && c != '\\': + return false + case i == 1 && c != 'u': + return false + case i >= 2 && i < 6 && !('0' <= c && c <= '9') && !('a' <= c && c <= 'f') && !('A' <= c && c <= 'F'): + return false + } + } + return true +} + +// unescapeString appends the unescaped form of a JSON string in src to dst. +// Any invalid UTF-8 within the string will be replaced with utf8.RuneError. +// The input must be an entire JSON string with no surrounding whitespace. +func unescapeString(dst, src []byte) (v []byte, ok bool) { + // Consume leading double quote. + if uint(len(src)) == 0 || src[0] != '"' { + return dst, false + } + i, n := 1, 1 + + // Consume every character until completion. + for uint(len(src)) > uint(n) { + // Optimize for long sequences of unescaped characters. + noEscape := func(c byte) bool { + return c < utf8.RuneSelf && ' ' <= c && c != '\\' && c != '"' + } + for uint(len(src)) > uint(n) && noEscape(src[n]) { + n++ + } + if uint(len(src)) <= uint(n) { + break + } + + // Check for terminating double quote. + if src[n] == '"' { + dst = append(dst, src[i:n]...) + n++ + return dst, len(src) == n + } + + switch r, rn := utf8.DecodeRune(src[n:]); { + // Handle UTF-8 encoded byte sequence. + // Due to specialized handling of ASCII above, we know that + // all normal sequences at this point must be 2 bytes or larger. + case rn > 1: + n += rn + // Handle escape sequence. + case r == '\\': + dst = append(dst, src[i:n]...) + if r < ' ' { + return dst, false // invalid control character or unescaped quote + } + + // Handle escape sequence. + if uint(len(src)) < uint(n+2) { + return dst, false // truncated escape sequence + } + switch r := src[n+1]; r { + case '"', '\\', '/': + dst = append(dst, r) + n += 2 + case 'b': + dst = append(dst, '\b') + n += 2 + case 'f': + dst = append(dst, '\f') + n += 2 + case 'n': + dst = append(dst, '\n') + n += 2 + case 'r': + dst = append(dst, '\r') + n += 2 + case 't': + dst = append(dst, '\t') + n += 2 + case 'u': + if uint(len(src)) < uint(n+6) { + return dst, false // truncated escape sequence + } + v1, ok := parseHexUint16(src[n+2 : n+6]) + if !ok { + return dst, false // invalid escape sequence + } + n += 6 + + // Check whether this is a surrogate half. + r := rune(v1) + if utf16.IsSurrogate(r) { + r = utf8.RuneError // assume failure unless the following succeeds + if uint(len(src)) >= uint(n+6) && src[n+0] == '\\' && src[n+1] == 'u' { + if v2, ok := parseHexUint16(src[n+2 : n+6]); ok { + if r = utf16.DecodeRune(rune(v1), rune(v2)); r != utf8.RuneError { + n += 6 + } + } + } + } + + dst = utf8.AppendRune(dst, r) + default: + return dst, false // invalid escape sequence + } + i = n + // Handle invalid UTF-8. + case r == utf8.RuneError: + // NOTE: An unescaped string may be longer than the escaped string + // because invalid UTF-8 bytes are being replaced. + dst = append(dst, src[i:n]...) + dst = append(dst, "\uFFFD"...) + n += rn + i = n + // Handle invalid control characters. + case r < ' ': + dst = append(dst, src[i:n]...) + return dst, false // invalid control character or unescaped quote + default: + panic("BUG: unhandled character " + quoteRune(src[n:])) + } + } + dst = append(dst, src[i:n]...) + return dst, false // truncated input +} + +// unescapeStringMayCopy returns the unescaped form of b. +// If there are no escaped characters, the output is simply a subslice of +// the input with the surrounding quotes removed. +// Otherwise, a new buffer is allocated for the output. +func unescapeStringMayCopy(b []byte, isVerbatim bool) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if isVerbatim { + return b[len(`"`) : len(b)-len(`"`)] + } + b, _ = unescapeString(make([]byte, 0, len(b)), b) + return b +} + +// consumeSimpleNumber consumes the next JSON number per RFC 7159, section 6 +// but is limited to the grammar for a positive integer. +// It returns 0 if it is invalid or more complicated than a simple integer, +// in which case consumeNumber should be called. +func consumeSimpleNumber(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 { + if b[0] == '0' { + n++ + } else if '1' <= b[0] && b[0] <= '9' { + n++ + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + } else { + return 0 + } + if len(b) == n || !(b[n] == '.' || b[n] == 'e' || b[n] == 'E') { + return n + } + } + return 0 +} + +type consumeNumberState uint + +const ( + consumeNumberInit consumeNumberState = iota + beforeIntegerDigits + withinIntegerDigits + beforeFractionalDigits + withinFractionalDigits + beforeExponentDigits + withinExponentDigits +) + +// consumeNumber consumes the next JSON number per RFC 7159, section 6. +// It reports the number of bytes consumed and whether an error was encountered. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +// +// Note that JSON numbers are not self-terminating. +// If the entire input is consumed, then the caller needs to consider whether +// there may be subsequent unread data that may still be part of this number. +func consumeNumber(b []byte) (n int, err error) { + n, _, err = consumeNumberResumable(b, 0, consumeNumberInit) + return n, err +} + +// consumeNumberResumable is identical to consumeNumber but supports resuming +// from a previous call that returned io.ErrUnexpectedEOF. +func consumeNumberResumable(b []byte, resumeOffset int, state consumeNumberState) (n int, _ consumeNumberState, err error) { + // Jump to the right state when resuming from a partial consumption. + n = resumeOffset + if state > consumeNumberInit { + switch state { + case withinIntegerDigits, withinFractionalDigits, withinExponentDigits: + // Consume leading digits. + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + if len(b) == n { + return n, state, nil // still within the same state + } + state++ // switches "withinX" to "beforeY" where Y is the state after X + } + switch state { + case beforeIntegerDigits: + goto beforeInteger + case beforeFractionalDigits: + goto beforeFractional + case beforeExponentDigits: + goto beforeExponent + default: + return n, state, nil + } + } + + // Consume required integer component (with optional minus sign). +beforeInteger: + resumeOffset = n + if len(b) > 0 && b[0] == '-' { + n++ + } + switch { + case len(b) == n: + return resumeOffset, beforeIntegerDigits, io.ErrUnexpectedEOF + case b[n] == '0': + n++ + state = beforeFractionalDigits + case '1' <= b[n] && b[n] <= '9': + n++ + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinIntegerDigits + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + + // Consume optional fractional component. +beforeFractional: + if len(b) > n && b[n] == '.' { + resumeOffset = n + n++ + switch { + case len(b) == n: + return resumeOffset, beforeFractionalDigits, io.ErrUnexpectedEOF + case '0' <= b[n] && b[n] <= '9': + n++ + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinFractionalDigits + } + + // Consume optional exponent component. +beforeExponent: + if len(b) > n && (b[n] == 'e' || b[n] == 'E') { + resumeOffset = n + n++ + if len(b) > n && (b[n] == '-' || b[n] == '+') { + n++ + } + switch { + case len(b) == n: + return resumeOffset, beforeExponentDigits, io.ErrUnexpectedEOF + case '0' <= b[n] && b[n] <= '9': + n++ + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinExponentDigits + } + + return n, state, nil +} + +// parseHexUint16 is similar to strconv.ParseUint, +// but operates directly on []byte and is optimized for base-16. +// See https://go.dev/issue/42429. +func parseHexUint16(b []byte) (v uint16, ok bool) { + if len(b) != 4 { + return 0, false + } + for _, c := range b[:4] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = 10 + c - 'a' + case 'A' <= c && c <= 'F': + c = 10 + c - 'A' + default: + return 0, false + } + v = v*16 + uint16(c) + } + return v, true +} + +// parseDecUint is similar to strconv.ParseUint, +// but operates directly on []byte and is optimized for base-10. +// If the number is syntactically valid but overflows uint64, +// then it returns (math.MaxUint64, false). +// See https://go.dev/issue/42429. +func parseDecUint(b []byte) (v uint64, ok bool) { + // Overflow logic is based on strconv/atoi.go:138-149 from Go1.15, where: + // - cutoff is equal to math.MaxUint64/10+1, and + // - the n1 > maxVal check is unnecessary + // since maxVal is equivalent to math.MaxUint64. + var n int + var overflow bool + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + overflow = overflow || v >= math.MaxUint64/10+1 + v *= 10 + + v1 := v + uint64(b[n]-'0') + overflow = overflow || v1 < v + v = v1 + + n++ + } + if n == 0 || len(b) != n { + return 0, false + } + if overflow { + return math.MaxUint64, false + } + return v, true +} + +// parseFloat parses a floating point number according to the Go float grammar. +// Note that the JSON number grammar is a strict subset. +// +// If the number overflows the finite representation of a float, +// then we return MaxFloat since any finite value will always be infinitely +// more accurate at representing another finite value than an infinite value. +func parseFloat(b []byte, bits int) (v float64, ok bool) { + // Fast path for exact integer numbers which fit in the + // 24-bit or 53-bit significand of a float32 or float64. + var negLen int // either 0 or 1 + if len(b) > 0 && b[0] == '-' { + negLen = 1 + } + u, ok := parseDecUint(b[negLen:]) + if ok && ((bits == 32 && u <= 1<<24) || (bits == 64 && u <= 1<<53)) { + return math.Copysign(float64(u), float64(-1*negLen)), true + } + + // Note that the []byte->string conversion unfortunately allocates. + // See https://go.dev/issue/42429 for more information. + fv, err := strconv.ParseFloat(string(b), bits) + if math.IsInf(fv, 0) { + switch { + case bits == 32 && math.IsInf(fv, +1): + return +math.MaxFloat32, true + case bits == 64 && math.IsInf(fv, +1): + return +math.MaxFloat64, true + case bits == 32 && math.IsInf(fv, -1): + return -math.MaxFloat32, true + case bits == 64 && math.IsInf(fv, -1): + return -math.MaxFloat64, true + } + } + return fv, err == nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go new file mode 100644 index 000000000..e4eefa3de --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go @@ -0,0 +1,182 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements serialization of JSON +// as specified in RFC 4627, RFC 7159, RFC 7493, RFC 8259, and RFC 8785. +// JSON is a simple data interchange format that can represent +// primitive data types such as booleans, strings, and numbers, +// in addition to structured data types such as objects and arrays. +// +// # Terminology +// +// This package uses the terms "encode" and "decode" for syntactic functionality +// that is concerned with processing JSON based on its grammar, and +// uses the terms "marshal" and "unmarshal" for semantic functionality +// that determines the meaning of JSON values as Go values and vice-versa. +// It aims to provide a clear distinction between functionality that +// is purely concerned with encoding versus that of marshaling. +// For example, one can directly encode a stream of JSON tokens without +// needing to marshal a concrete Go value representing them. +// Similarly, one can decode a stream of JSON tokens without +// needing to unmarshal them into a concrete Go value. +// +// This package uses JSON terminology when discussing JSON, which may differ +// from related concepts in Go or elsewhere in computing literature. +// +// - A JSON "object" refers to an unordered collection of name/value members. +// - A JSON "array" refers to an ordered sequence of elements. +// - A JSON "value" refers to either a literal (i.e., null, false, or true), +// string, number, object, or array. +// +// See RFC 8259 for more information. +// +// # Specifications +// +// Relevant specifications include RFC 4627, RFC 7159, RFC 7493, RFC 8259, +// and RFC 8785. Each RFC is generally a stricter subset of another RFC. +// In increasing order of strictness: +// +// - RFC 4627 and RFC 7159 do not require (but recommend) the use of UTF-8 +// and also do not require (but recommend) that object names be unique. +// - RFC 8259 requires the use of UTF-8, +// but does not require (but recommends) that object names be unique. +// - RFC 7493 requires the use of UTF-8 +// and also requires that object names be unique. +// - RFC 8785 defines a canonical representation. It requires the use of UTF-8 +// and also requires that object names be unique and in a specific ordering. +// It specifies exactly how strings and numbers must be formatted. +// +// The primary difference between RFC 4627 and RFC 7159 is that the former +// restricted top-level values to only JSON objects and arrays, while +// RFC 7159 and subsequent RFCs permit top-level values to additionally be +// JSON nulls, booleans, strings, or numbers. +// +// By default, this package operates on RFC 7493, but can be configured +// to operate according to the other RFC specifications. +// RFC 7493 is a stricter subset of RFC 8259 and fully compliant with it. +// In particular, it makes specific choices about behavior that RFC 8259 +// leaves as undefined in order to ensure greater interoperability. +// +// # JSON Representation of Go structs +// +// A Go struct is naturally represented as a JSON object, +// where each Go struct field corresponds with a JSON object member. +// When marshaling, all Go struct fields are recursively encoded in depth-first +// order as JSON object members except those that are ignored or omitted. +// When unmarshaling, JSON object members are recursively decoded +// into the corresponding Go struct fields. +// Object members that do not match any struct fields, +// also known as “unknown members”, are ignored by default or rejected +// if UnmarshalOptions.RejectUnknownMembers is specified. +// +// The representation of each struct field can be customized in the +// "json" struct field tag, where the tag is a comma separated list of options. +// As a special case, if the entire tag is `json:"-"`, +// then the field is ignored with regard to its JSON representation. +// +// The first option is the JSON object name override for the Go struct field. +// If the name is not specified, then the Go struct field name +// is used as the JSON object name. JSON names containing commas or quotes, +// or names identical to "" or "-", can be specified using +// a single-quoted string literal, where the syntax is identical to +// the Go grammar for a double-quoted string literal, +// but instead uses single quotes as the delimiters. +// By default, unmarshaling uses case-sensitive matching to identify +// the Go struct field associated with a JSON object name. +// +// After the name, the following tag options are supported: +// +// - omitzero: When marshaling, the "omitzero" option specifies that +// the struct field should be omitted if the field value is zero +// as determined by the "IsZero() bool" method if present, +// otherwise based on whether the field is the zero Go value. +// This option has no effect when unmarshaling. +// +// - omitempty: When marshaling, the "omitempty" option specifies that +// the struct field should be omitted if the field value would have been +// encoded as a JSON null, empty string, empty object, or empty array. +// This option has no effect when unmarshaling. +// +// - string: The "string" option specifies that +// MarshalOptions.StringifyNumbers and UnmarshalOptions.StringifyNumbers +// be set when marshaling or unmarshaling a struct field value. +// This causes numeric types to be encoded as a JSON number +// within a JSON string, and to be decoded from either a JSON number or +// a JSON string containing a JSON number. +// This extra level of encoding is often necessary since +// many JSON parsers cannot precisely represent 64-bit integers. +// +// - nocase: When unmarshaling, the "nocase" option specifies that +// if the JSON object name does not exactly match the JSON name +// for any of the struct fields, then it attempts to match the struct field +// using a case-insensitive match that also ignores dashes and underscores. +// If multiple fields match, the first declared field in breadth-first order +// takes precedence. This option has no effect when marshaling. +// +// - inline: The "inline" option specifies that +// the JSON representable content of this field type is to be promoted +// as if they were specified in the parent struct. +// It is the JSON equivalent of Go struct embedding. +// A Go embedded field is implicitly inlined unless an explicit JSON name +// is specified. The inlined field must be a Go struct +// (that does not implement any JSON methods), RawValue, map[string]T, +// or an unnamed pointer to such types. When marshaling, +// inlined fields from a pointer type are omitted if it is nil. +// Inlined fields of type RawValue and map[string]T are called +// “inlined fallbacks” as they can represent all possible +// JSON object members not directly handled by the parent struct. +// Only one inlined fallback field may be specified in a struct, +// while many non-fallback fields may be specified. This option +// must not be specified with any other option (including the JSON name). +// +// - unknown: The "unknown" option is a specialized variant +// of the inlined fallback to indicate that this Go struct field +// contains any number of unknown JSON object members. The field type +// must be a RawValue, map[string]T, or an unnamed pointer to such types. +// If MarshalOptions.DiscardUnknownMembers is specified when marshaling, +// the contents of this field are ignored. +// If UnmarshalOptions.RejectUnknownMembers is specified when unmarshaling, +// any unknown object members are rejected regardless of whether +// an inlined fallback with the "unknown" option exists. This option +// must not be specified with any other option (including the JSON name). +// +// - format: The "format" option specifies a format flag +// used to specialize the formatting of the field value. +// The option is a key-value pair specified as "format:value" where +// the value must be either a literal consisting of letters and numbers +// (e.g., "format:RFC3339") or a single-quoted string literal +// (e.g., "format:'2006-01-02'"). The interpretation of the format flag +// is determined by the struct field type. +// +// The "omitzero" and "omitempty" options are mostly semantically identical. +// The former is defined in terms of the Go type system, +// while the latter in terms of the JSON type system. +// Consequently they behave differently in some circumstances. +// For example, only a nil slice or map is omitted under "omitzero", while +// an empty slice or map is omitted under "omitempty" regardless of nilness. +// The "omitzero" option is useful for types with a well-defined zero value +// (e.g., netip.Addr) or have an IsZero method (e.g., time.Time). +// +// Every Go struct corresponds to a list of JSON representable fields +// which is constructed by performing a breadth-first search over +// all struct fields (excluding unexported or ignored fields), +// where the search recursively descends into inlined structs. +// The set of non-inlined fields in a struct must have unique JSON names. +// If multiple fields all have the same JSON name, then the one +// at shallowest depth takes precedence and the other fields at deeper depths +// are excluded from the list of JSON representable fields. +// If multiple fields at the shallowest depth have the same JSON name, +// then all of those fields are excluded from the list. This is analogous to +// Go visibility rules for struct field selection with embedded struct types. +// +// Marshaling or unmarshaling a non-empty struct +// without any JSON representable fields results in a SemanticError. +// Unexported fields must not have any `json` tags except for `json:"-"`. +package json + +// requireKeyedLiterals can be embedded in a struct to require keyed literals. +type requireKeyedLiterals struct{} + +// nonComparable can be embedded in a struct to prevent comparability. +type nonComparable [0]func() diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go new file mode 100644 index 000000000..5b81ca15a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go @@ -0,0 +1,1170 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "io" + "math" + "math/bits" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +// EncodeOptions configures how JSON encoding operates. +// The zero value is equivalent to the default settings, +// which is compliant with both RFC 7493 and RFC 8259. +type EncodeOptions struct { + requireKeyedLiterals + nonComparable + + // multiline specifies whether the encoder should emit multiline output. + multiline bool + + // omitTopLevelNewline specifies whether to omit the newline + // that is appended after every top-level JSON value when streaming. + omitTopLevelNewline bool + + // AllowDuplicateNames specifies that JSON objects may contain + // duplicate member names. Disabling the duplicate name check may provide + // performance benefits, but breaks compliance with RFC 7493, section 2.3. + // The output will still be compliant with RFC 8259, + // which leaves the handling of duplicate names as unspecified behavior. + AllowDuplicateNames bool + + // AllowInvalidUTF8 specifies that JSON strings may contain invalid UTF-8, + // which will be mangled as the Unicode replacement character, U+FFFD. + // This causes the encoder to break compliance with + // RFC 7493, section 2.1, and RFC 8259, section 8.1. + AllowInvalidUTF8 bool + + // preserveRawStrings specifies that WriteToken and WriteValue should not + // reformat any JSON string, but keep the formatting verbatim. + preserveRawStrings bool + + // canonicalizeNumbers specifies that WriteToken and WriteValue should + // reformat any JSON numbers according to RFC 8785, section 3.2.2.3. + canonicalizeNumbers bool + + // EscapeRune reports whether the provided character should be escaped + // as a hexadecimal Unicode codepoint (e.g., \ufffd). + // If nil, the shortest and simplest encoding will be used, + // which is also the formatting specified by RFC 8785, section 3.2.2.2. + EscapeRune func(rune) bool + + // Indent (if non-empty) specifies that the encoder should emit multiline + // output where each element in a JSON object or array begins on a new, + // indented line beginning with the indent prefix followed by one or more + // copies of indent according to the indentation nesting. + // It may only be composed of space or tab characters. + Indent string + + // IndentPrefix is prepended to each line within a JSON object or array. + // The purpose of the indent prefix is to encode data that can more easily + // be embedded inside other formatted JSON data. + // It may only be composed of space or tab characters. + // It is ignored if Indent is empty. + IndentPrefix string +} + +// Encoder is a streaming encoder from raw JSON tokens and values. +// It is used to write a stream of top-level JSON values, +// each terminated with a newline character. +// +// WriteToken and WriteValue calls may be interleaved. +// For example, the following JSON value: +// +// {"name":"value","array":[null,false,true,3.14159],"object":{"k":"v"}} +// +// can be composed with the following calls (ignoring errors for brevity): +// +// e.WriteToken(ObjectStart) // { +// e.WriteToken(String("name")) // "name" +// e.WriteToken(String("value")) // "value" +// e.WriteValue(RawValue(`"array"`)) // "array" +// e.WriteToken(ArrayStart) // [ +// e.WriteToken(Null) // null +// e.WriteToken(False) // false +// e.WriteValue(RawValue("true")) // true +// e.WriteToken(Float(3.14159)) // 3.14159 +// e.WriteToken(ArrayEnd) // ] +// e.WriteValue(RawValue(`"object"`)) // "object" +// e.WriteValue(RawValue(`{"k":"v"}`)) // {"k":"v"} +// e.WriteToken(ObjectEnd) // } +// +// The above is one of many possible sequence of calls and +// may not represent the most sensible method to call for any given token/value. +// For example, it is probably more common to call WriteToken with a string +// for object names. +type Encoder struct { + state + encodeBuffer + options EncodeOptions + + seenPointers seenPointers // only used when marshaling +} + +// encodeBuffer is a buffer split into 2 segments: +// +// - buf[0:len(buf)] // written (but unflushed) portion of the buffer +// - buf[len(buf):cap(buf)] // unused portion of the buffer +type encodeBuffer struct { + buf []byte // may alias wr if it is a bytes.Buffer + + // baseOffset is added to len(buf) to obtain the absolute offset + // relative to the start of io.Writer stream. + baseOffset int64 + + wr io.Writer + + // maxValue is the approximate maximum RawValue size passed to WriteValue. + maxValue int + // unusedCache is the buffer returned by the UnusedBuffer method. + unusedCache []byte + // bufStats is statistics about buffer utilization. + // It is only used with pooled encoders in pools.go. + bufStats bufferStatistics +} + +// NewEncoder constructs a new streaming encoder writing to w. +func NewEncoder(w io.Writer) *Encoder { + return EncodeOptions{}.NewEncoder(w) +} + +// NewEncoder constructs a new streaming encoder writing to w +// configured with the provided options. +// It flushes the internal buffer when the buffer is sufficiently full or +// when a top-level value has been written. +// +// If w is a bytes.Buffer, then the encoder appends directly into the buffer +// without copying the contents from an intermediate buffer. +func (o EncodeOptions) NewEncoder(w io.Writer) *Encoder { + e := new(Encoder) + o.ResetEncoder(e, w) + return e +} + +// ResetEncoder resets an encoder such that it is writing afresh to w and +// configured with the provided options. +func (o EncodeOptions) ResetEncoder(e *Encoder, w io.Writer) { + if e == nil { + panic("json: invalid nil Encoder") + } + if w == nil { + panic("json: invalid nil io.Writer") + } + e.reset(nil, w, o) +} + +func (e *Encoder) reset(b []byte, w io.Writer, o EncodeOptions) { + if len(o.Indent) > 0 { + o.multiline = true + if s := trimLeftSpaceTab(o.IndentPrefix); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent prefix") + } + if s := trimLeftSpaceTab(o.Indent); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent") + } + } + e.state.reset() + e.encodeBuffer = encodeBuffer{buf: b, wr: w, bufStats: e.bufStats} + e.options = o + if bb, ok := w.(*bytes.Buffer); ok && bb != nil { + e.buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb + } +} + +// Reset resets an encoder such that it is writing afresh to w but +// keeps any pre-existing encoder options. +func (e *Encoder) Reset(w io.Writer) { + e.options.ResetEncoder(e, w) +} + +// needFlush determines whether to flush at this point. +func (e *Encoder) needFlush() bool { + // NOTE: This function is carefully written to be inlineable. + + // Avoid flushing if e.wr is nil since there is no underlying writer. + // Flush if less than 25% of the capacity remains. + // Flushing at some constant fraction ensures that the buffer stops growing + // so long as the largest Token or Value fits within that unused capacity. + return e.wr != nil && (e.tokens.depth() == 1 || len(e.buf) > 3*cap(e.buf)/4) +} + +// flush flushes the buffer to the underlying io.Writer. +// It may append a trailing newline after the top-level value. +func (e *Encoder) flush() error { + if e.wr == nil || e.avoidFlush() { + return nil + } + + // In streaming mode, always emit a newline after the top-level value. + if e.tokens.depth() == 1 && !e.options.omitTopLevelNewline { + e.buf = append(e.buf, '\n') + } + + // Inform objectNameStack that we are about to flush the buffer content. + e.names.copyQuotedBuffer(e.buf) + + // Specialize bytes.Buffer for better performance. + if bb, ok := e.wr.(*bytes.Buffer); ok { + // If e.buf already aliases the internal buffer of bb, + // then the Write call simply increments the internal offset, + // otherwise Write operates as expected. + // See https://go.dev/issue/42986. + n, _ := bb.Write(e.buf) // never fails unless bb is nil + e.baseOffset += int64(n) + + // If the internal buffer of bytes.Buffer is too small, + // append operations elsewhere in the Encoder may grow the buffer. + // This would be semantically correct, but hurts performance. + // As such, ensure 25% of the current length is always available + // to reduce the probability that other appends must allocate. + if avail := bb.Cap() - bb.Len(); avail < bb.Len()/4 { + bb.Grow(avail + 1) + } + + e.buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb + return nil + } + + // Flush the internal buffer to the underlying io.Writer. + n, err := e.wr.Write(e.buf) + e.baseOffset += int64(n) + if err != nil { + // In the event of an error, preserve the unflushed portion. + // Thus, write errors aren't fatal so long as the io.Writer + // maintains consistent state after errors. + if n > 0 { + e.buf = e.buf[:copy(e.buf, e.buf[n:])] + } + return &ioError{action: "write", err: err} + } + e.buf = e.buf[:0] + + // Check whether to grow the buffer. + // Note that cap(e.buf) may already exceed maxBufferSize since + // an append elsewhere already grew it to store a large token. + const maxBufferSize = 4 << 10 + const growthSizeFactor = 2 // higher value is faster + const growthRateFactor = 2 // higher value is slower + // By default, grow if below the maximum buffer size. + grow := cap(e.buf) <= maxBufferSize/growthSizeFactor + // Growing can be expensive, so only grow + // if a sufficient number of bytes have been processed. + grow = grow && int64(cap(e.buf)) < e.previousOffsetEnd()/growthRateFactor + if grow { + e.buf = make([]byte, 0, cap(e.buf)*growthSizeFactor) + } + + return nil +} + +func (e *encodeBuffer) previousOffsetEnd() int64 { return e.baseOffset + int64(len(e.buf)) } +func (e *encodeBuffer) unflushedBuffer() []byte { return e.buf } + +// avoidFlush indicates whether to avoid flushing to ensure there is always +// enough in the buffer to unwrite the last object member if it were empty. +func (e *Encoder) avoidFlush() bool { + switch { + case e.tokens.last.length() == 0: + // Never flush after ObjectStart or ArrayStart since we don't know yet + // if the object or array will end up being empty. + return true + case e.tokens.last.needObjectValue(): + // Never flush before the object value since we don't know yet + // if the object value will end up being empty. + return true + case e.tokens.last.needObjectName() && len(e.buf) >= 2: + // Never flush after the object value if it does turn out to be empty. + switch string(e.buf[len(e.buf)-2:]) { + case `ll`, `""`, `{}`, `[]`: // last two bytes of every empty value + return true + } + } + return false +} + +// unwriteEmptyObjectMember unwrites the last object member if it is empty +// and reports whether it performed an unwrite operation. +func (e *Encoder) unwriteEmptyObjectMember(prevName *string) bool { + if last := e.tokens.last; !last.isObject() || !last.needObjectName() || last.length() == 0 { + panic("BUG: must be called on an object after writing a value") + } + + // The flushing logic is modified to never flush a trailing empty value. + // The encoder never writes trailing whitespace eagerly. + b := e.unflushedBuffer() + + // Detect whether the last value was empty. + var n int + if len(b) >= 3 { + switch string(b[len(b)-2:]) { + case "ll": // last two bytes of `null` + n = len(`null`) + case `""`: + // It is possible for a non-empty string to have `""` as a suffix + // if the second to the last quote was escaped. + if b[len(b)-3] == '\\' { + return false // e.g., `"\""` is not empty + } + n = len(`""`) + case `{}`: + n = len(`{}`) + case `[]`: + n = len(`[]`) + } + } + if n == 0 { + return false + } + + // Unwrite the value, whitespace, colon, name, whitespace, and comma. + b = b[:len(b)-n] + b = trimSuffixWhitespace(b) + b = trimSuffixByte(b, ':') + b = trimSuffixString(b) + b = trimSuffixWhitespace(b) + b = trimSuffixByte(b, ',') + e.buf = b // store back truncated unflushed buffer + + // Undo state changes. + e.tokens.last.decrement() // for object member value + e.tokens.last.decrement() // for object member name + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + if prevName != nil { + e.names.copyQuotedBuffer(e.buf) // required by objectNameStack.replaceLastUnquotedName + e.names.replaceLastUnquotedName(*prevName) + } + } + return true +} + +// unwriteOnlyObjectMemberName unwrites the only object member name +// and returns the unquoted name. +func (e *Encoder) unwriteOnlyObjectMemberName() string { + if last := e.tokens.last; !last.isObject() || last.length() != 1 { + panic("BUG: must be called on an object after writing first name") + } + + // Unwrite the name and whitespace. + b := trimSuffixString(e.buf) + isVerbatim := bytes.IndexByte(e.buf[len(b):], '\\') < 0 + name := string(unescapeStringMayCopy(e.buf[len(b):], isVerbatim)) + e.buf = trimSuffixWhitespace(b) + + // Undo state changes. + e.tokens.last.decrement() + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + } + return name +} + +func trimSuffixWhitespace(b []byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + n := len(b) - 1 + for n >= 0 && (b[n] == ' ' || b[n] == '\t' || b[n] == '\r' || b[n] == '\n') { + n-- + } + return b[:n+1] +} + +func trimSuffixString(b []byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[len(b)-1] == '"' { + b = b[:len(b)-1] + } + for len(b) >= 2 && !(b[len(b)-1] == '"' && b[len(b)-2] != '\\') { + b = b[:len(b)-1] // trim all characters except an unescaped quote + } + if len(b) > 0 && b[len(b)-1] == '"' { + b = b[:len(b)-1] + } + return b +} + +func hasSuffixByte(b []byte, c byte) bool { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + return len(b) > 0 && b[len(b)-1] == c +} + +func trimSuffixByte(b []byte, c byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[len(b)-1] == c { + return b[:len(b)-1] + } + return b +} + +// WriteToken writes the next token and advances the internal write offset. +// +// The provided token kind must be consistent with the JSON grammar. +// For example, it is an error to provide a number when the encoder +// is expecting an object name (which is always a string), or +// to provide an end object delimiter when the encoder is finishing an array. +// If the provided token is invalid, then it reports a SyntacticError and +// the internal state remains unchanged. +func (e *Encoder) WriteToken(t Token) error { + k := t.Kind() + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, k) + if e.options.multiline { + b = e.appendWhitespace(b, k) + } + + // Append the token to the output and to the state machine. + var err error + switch k { + case 'n': + b = append(b, "null"...) + err = e.tokens.appendLiteral() + case 'f': + b = append(b, "false"...) + err = e.tokens.appendLiteral() + case 't': + b = append(b, "true"...) + err = e.tokens.appendLiteral() + case '"': + n0 := len(b) // offset before calling t.appendString + if b, err = t.appendString(b, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune); err != nil { + break + } + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + err = &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + break + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + err = e.tokens.appendString() + case '0': + if b, err = t.appendNumber(b, e.options.canonicalizeNumbers); err != nil { + break + } + err = e.tokens.appendNumber() + case '{': + b = append(b, '{') + if err = e.tokens.pushObject(); err != nil { + break + } + if !e.options.AllowDuplicateNames { + e.names.push() + e.namespaces.push() + } + case '}': + b = append(b, '}') + if err = e.tokens.popObject(); err != nil { + break + } + if !e.options.AllowDuplicateNames { + e.names.pop() + e.namespaces.pop() + } + case '[': + b = append(b, '[') + err = e.tokens.pushArray() + case ']': + b = append(b, ']') + err = e.tokens.popArray() + default: + return &SyntacticError{str: "invalid json.Token"} + } + if err != nil { + return err + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +const ( + rawIntNumber = -1 + rawUintNumber = -2 +) + +// writeNumber is specialized version of WriteToken, but optimized for numbers. +// As a special-case, if bits is -1 or -2, it will treat v as +// the raw-encoded bits of an int64 or uint64, respectively. +// It is only called from arshal_default.go. +func (e *Encoder) writeNumber(v float64, bits int, quote bool) error { + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, '0') + if e.options.multiline { + b = e.appendWhitespace(b, '0') + } + + if quote { + // Append the value to the output. + n0 := len(b) // offset before appending the number + b = append(b, '"') + switch bits { + case rawIntNumber: + b = strconv.AppendInt(b, int64(math.Float64bits(v)), 10) + case rawUintNumber: + b = strconv.AppendUint(b, uint64(math.Float64bits(v)), 10) + default: + b = appendNumber(b, v, bits) + } + b = append(b, '"') + + // Escape the string if necessary. + if e.options.EscapeRune != nil { + b2 := append(e.unusedCache, b[n0+len(`"`):len(b)-len(`"`)]...) + b, _ = appendString(b[:n0], string(b2), false, e.options.EscapeRune) + e.unusedCache = b2[:0] + } + + // Update the state machine. + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + return errInvalidNamespace + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + return &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + if err := e.tokens.appendString(); err != nil { + return err + } + } else { + switch bits { + case rawIntNumber: + b = strconv.AppendInt(b, int64(math.Float64bits(v)), 10) + case rawUintNumber: + b = strconv.AppendUint(b, uint64(math.Float64bits(v)), 10) + default: + b = appendNumber(b, v, bits) + } + if err := e.tokens.appendNumber(); err != nil { + return err + } + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +// WriteValue writes the next raw value and advances the internal write offset. +// The Encoder does not simply copy the provided value verbatim, but +// parses it to ensure that it is syntactically valid and reformats it +// according to how the Encoder is configured to format whitespace and strings. +// +// The provided value kind must be consistent with the JSON grammar +// (see examples on Encoder.WriteToken). If the provided value is invalid, +// then it reports a SyntacticError and the internal state remains unchanged. +func (e *Encoder) WriteValue(v RawValue) error { + e.maxValue |= len(v) // bitwise OR is a fast approximation of max + + k := v.Kind() + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, k) + if e.options.multiline { + b = e.appendWhitespace(b, k) + } + + // Append the value the output. + var err error + v = v[consumeWhitespace(v):] + n0 := len(b) // offset before calling e.reformatValue + b, v, err = e.reformatValue(b, v, e.tokens.depth()) + if err != nil { + return err + } + v = v[consumeWhitespace(v):] + if len(v) > 0 { + return newInvalidCharacterError(v[0:], "after top-level value") + } + + // Append the kind to the state machine. + switch k { + case 'n', 'f', 't': + err = e.tokens.appendLiteral() + case '"': + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + err = &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + break + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + err = e.tokens.appendString() + case '0': + err = e.tokens.appendNumber() + case '{': + if err = e.tokens.pushObject(); err != nil { + break + } + if err = e.tokens.popObject(); err != nil { + panic("BUG: popObject should never fail immediately after pushObject: " + err.Error()) + } + case '[': + if err = e.tokens.pushArray(); err != nil { + break + } + if err = e.tokens.popArray(); err != nil { + panic("BUG: popArray should never fail immediately after pushArray: " + err.Error()) + } + } + if err != nil { + return err + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +// appendWhitespace appends whitespace that immediately precedes the next token. +func (e *Encoder) appendWhitespace(b []byte, next Kind) []byte { + if e.tokens.needDelim(next) == ':' { + return append(b, ' ') + } else { + return e.appendIndent(b, e.tokens.needIndent(next)) + } +} + +// appendIndent appends the appropriate number of indentation characters +// for the current nested level, n. +func (e *Encoder) appendIndent(b []byte, n int) []byte { + if n == 0 { + return b + } + b = append(b, '\n') + b = append(b, e.options.IndentPrefix...) + for ; n > 1; n-- { + b = append(b, e.options.Indent...) + } + return b +} + +// reformatValue parses a JSON value from the start of src and +// appends it to the end of dst, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatValue(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // TODO: Should this update valueFlags as input? + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + var n int + var err error + switch k := Kind(src[0]).normalize(); k { + case 'n': + if n = consumeNull(src); n == 0 { + n, err = consumeLiteral(src, "null") + } + case 'f': + if n = consumeFalse(src); n == 0 { + n, err = consumeLiteral(src, "false") + } + case 't': + if n = consumeTrue(src); n == 0 { + n, err = consumeLiteral(src, "true") + } + case '"': + if n := consumeSimpleString(src); n > 0 && e.options.EscapeRune == nil { + dst, src = append(dst, src[:n]...), src[n:] // copy simple strings verbatim + return dst, src, nil + } + return reformatString(dst, src, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune) + case '0': + if n := consumeSimpleNumber(src); n > 0 && !e.options.canonicalizeNumbers { + dst, src = append(dst, src[:n]...), src[n:] // copy simple numbers verbatim + return dst, src, nil + } + return reformatNumber(dst, src, e.options.canonicalizeNumbers) + case '{': + return e.reformatObject(dst, src, depth) + case '[': + return e.reformatArray(dst, src, depth) + default: + return dst, src, newInvalidCharacterError(src, "at start of value") + } + if err != nil { + return dst, src, err + } + dst, src = append(dst, src[:n]...), src[n:] + return dst, src, nil +} + +// reformatObject parses a JSON object from the start of src and +// appends it to the end of src, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatObject(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // Append object start. + if src[0] != '{' { + panic("BUG: reformatObject must be called with a buffer that starts with '{'") + } + dst, src = append(dst, '{'), src[1:] + + // Append (possible) object end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] == '}' { + dst, src = append(dst, '}'), src[1:] + return dst, src, nil + } + + var err error + var names *objectNamespace + if !e.options.AllowDuplicateNames { + e.namespaces.push() + defer e.namespaces.pop() + names = e.namespaces.last() + } + depth++ + for { + // Append optional newline and indentation. + if e.options.multiline { + dst = e.appendIndent(dst, depth) + } + + // Append object name. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + n0 := len(dst) // offset before calling reformatString + n := consumeSimpleString(src) + if n > 0 && e.options.EscapeRune == nil { + dst, src = append(dst, src[:n]...), src[n:] // copy simple strings verbatim + } else { + dst, src, err = reformatString(dst, src, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune) + } + if err != nil { + return dst, src, err + } + if !e.options.AllowDuplicateNames && !names.insertQuoted(dst[n0:], false) { + return dst, src, &SyntacticError{str: "duplicate name " + string(dst[n0:]) + " in object"} + } + + // Append colon. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] != ':' { + return dst, src, newInvalidCharacterError(src, "after object name (expecting ':')") + } + dst, src = append(dst, ':'), src[1:] + if e.options.multiline { + dst = append(dst, ' ') + } + + // Append object value. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + dst, src, err = e.reformatValue(dst, src, depth) + if err != nil { + return dst, src, err + } + + // Append comma or object end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + switch src[0] { + case ',': + dst, src = append(dst, ','), src[1:] + continue + case '}': + if e.options.multiline { + dst = e.appendIndent(dst, depth-1) + } + dst, src = append(dst, '}'), src[1:] + return dst, src, nil + default: + return dst, src, newInvalidCharacterError(src, "after object value (expecting ',' or '}')") + } + } +} + +// reformatArray parses a JSON array from the start of src and +// appends it to the end of dst, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatArray(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // Append array start. + if src[0] != '[' { + panic("BUG: reformatArray must be called with a buffer that starts with '['") + } + dst, src = append(dst, '['), src[1:] + + // Append (possible) array end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] == ']' { + dst, src = append(dst, ']'), src[1:] + return dst, src, nil + } + + var err error + depth++ + for { + // Append optional newline and indentation. + if e.options.multiline { + dst = e.appendIndent(dst, depth) + } + + // Append array value. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + dst, src, err = e.reformatValue(dst, src, depth) + if err != nil { + return dst, src, err + } + + // Append comma or array end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + switch src[0] { + case ',': + dst, src = append(dst, ','), src[1:] + continue + case ']': + if e.options.multiline { + dst = e.appendIndent(dst, depth-1) + } + dst, src = append(dst, ']'), src[1:] + return dst, src, nil + default: + return dst, src, newInvalidCharacterError(src, "after array value (expecting ',' or ']')") + } + } +} + +// OutputOffset returns the current output byte offset. It gives the location +// of the next byte immediately after the most recently written token or value. +// The number of bytes actually written to the underlying io.Writer may be less +// than this offset due to internal buffering effects. +func (e *Encoder) OutputOffset() int64 { + return e.previousOffsetEnd() +} + +// UnusedBuffer returns a zero-length buffer with a possible non-zero capacity. +// This buffer is intended to be used to populate a RawValue +// being passed to an immediately succeeding WriteValue call. +// +// Example usage: +// +// b := d.UnusedBuffer() +// b = append(b, '"') +// b = appendString(b, v) // append the string formatting of v +// b = append(b, '"') +// ... := d.WriteValue(b) +// +// It is the user's responsibility to ensure that the value is valid JSON. +func (e *Encoder) UnusedBuffer() []byte { + // NOTE: We don't return e.buf[len(e.buf):cap(e.buf)] since WriteValue would + // need to take special care to avoid mangling the data while reformatting. + // WriteValue can't easily identify whether the input RawValue aliases e.buf + // without using unsafe.Pointer. Thus, we just return a different buffer. + // Should this ever alias e.buf, we need to consider how it operates with + // the specialized performance optimization for bytes.Buffer. + n := 1 << bits.Len(uint(e.maxValue|63)) // fast approximation for max length + if cap(e.unusedCache) < n { + e.unusedCache = make([]byte, 0, n) + } + return e.unusedCache +} + +// StackDepth returns the depth of the state machine for written JSON data. +// Each level on the stack represents a nested JSON object or array. +// It is incremented whenever an ObjectStart or ArrayStart token is encountered +// and decremented whenever an ObjectEnd or ArrayEnd token is encountered. +// The depth is zero-indexed, where zero represents the top-level JSON value. +func (e *Encoder) StackDepth() int { + // NOTE: Keep in sync with Decoder.StackDepth. + return e.tokens.depth() - 1 +} + +// StackIndex returns information about the specified stack level. +// It must be a number between 0 and StackDepth, inclusive. +// For each level, it reports the kind: +// +// - 0 for a level of zero, +// - '{' for a level representing a JSON object, and +// - '[' for a level representing a JSON array. +// +// It also reports the length of that JSON object or array. +// Each name and value in a JSON object is counted separately, +// so the effective number of members would be half the length. +// A complete JSON object must have an even length. +func (e *Encoder) StackIndex(i int) (Kind, int) { + // NOTE: Keep in sync with Decoder.StackIndex. + switch s := e.tokens.index(i); { + case i > 0 && s.isObject(): + return '{', s.length() + case i > 0 && s.isArray(): + return '[', s.length() + default: + return 0, s.length() + } +} + +// StackPointer returns a JSON Pointer (RFC 6901) to the most recently written value. +// Object names are only present if AllowDuplicateNames is false, otherwise +// object members are represented using their index within the object. +func (e *Encoder) StackPointer() string { + e.names.copyQuotedBuffer(e.buf) + return string(e.appendStackPointer(nil)) +} + +// appendString appends src to dst as a JSON string per RFC 7159, section 7. +// +// If validateUTF8 is specified, this rejects input that contains invalid UTF-8 +// otherwise invalid bytes are replaced with the Unicode replacement character. +// If escapeRune is provided, it specifies which runes to escape using +// hexadecimal sequences. If nil, the shortest representable form is used, +// which is also the canonical form for strings (RFC 8785, section 3.2.2.2). +// +// Note that this API allows full control over the formatting of strings +// except for whether a forward solidus '/' may be formatted as '\/' and +// the casing of hexadecimal Unicode escape sequences. +func appendString(dst []byte, src string, validateUTF8 bool, escapeRune func(rune) bool) ([]byte, error) { + appendEscapedASCII := func(dst []byte, c byte) []byte { + switch c { + case '"', '\\': + dst = append(dst, '\\', c) + case '\b': + dst = append(dst, "\\b"...) + case '\f': + dst = append(dst, "\\f"...) + case '\n': + dst = append(dst, "\\n"...) + case '\r': + dst = append(dst, "\\r"...) + case '\t': + dst = append(dst, "\\t"...) + default: + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(c)) + } + return dst + } + appendEscapedUnicode := func(dst []byte, r rune) []byte { + if r1, r2 := utf16.EncodeRune(r); r1 != '\ufffd' && r2 != '\ufffd' { + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r1)) + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r2)) + } else { + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r)) + } + return dst + } + + // Optimize for when escapeRune is nil. + if escapeRune == nil { + var i, n int + dst = append(dst, '"') + for uint(len(src)) > uint(n) { + // Handle single-byte ASCII. + if c := src[n]; c < utf8.RuneSelf { + n++ + if c < ' ' || c == '"' || c == '\\' { + dst = append(dst, src[i:n-1]...) + dst = appendEscapedASCII(dst, c) + i = n + } + continue + } + + // Handle multi-byte Unicode. + _, rn := utf8.DecodeRuneInString(src[n:]) + n += rn + if rn == 1 { // must be utf8.RuneError since we already checked for single-byte ASCII + dst = append(dst, src[i:n-rn]...) + if validateUTF8 { + return dst, &SyntacticError{str: "invalid UTF-8 within string"} + } + dst = append(dst, "\ufffd"...) + i = n + } + } + dst = append(dst, src[i:n]...) + dst = append(dst, '"') + return dst, nil + } + + // Slower implementation for when escapeRune is non-nil. + var i, n int + dst = append(dst, '"') + for uint(len(src)) > uint(n) { + switch r, rn := utf8.DecodeRuneInString(src[n:]); { + case r == utf8.RuneError && rn == 1: + dst = append(dst, src[i:n]...) + if validateUTF8 { + return dst, &SyntacticError{str: "invalid UTF-8 within string"} + } + if escapeRune('\ufffd') { + dst = append(dst, `\ufffd`...) + } else { + dst = append(dst, "\ufffd"...) + } + n += rn + i = n + case escapeRune(r): + dst = append(dst, src[i:n]...) + dst = appendEscapedUnicode(dst, r) + n += rn + i = n + case r < ' ' || r == '"' || r == '\\': + dst = append(dst, src[i:n]...) + dst = appendEscapedASCII(dst, byte(r)) + n += rn + i = n + default: + n += rn + } + } + dst = append(dst, src[i:n]...) + dst = append(dst, '"') + return dst, nil +} + +// reformatString consumes a JSON string from src and appends it to dst, +// reformatting it if necessary for the given escapeRune parameter. +// It returns the appended output and the remainder of the input. +func reformatString(dst, src []byte, validateUTF8, preserveRaw bool, escapeRune func(rune) bool) ([]byte, []byte, error) { + // TODO: Should this update valueFlags as input? + var flags valueFlags + n, err := consumeString(&flags, src, validateUTF8) + if err != nil { + return dst, src[n:], err + } + if preserveRaw || (escapeRune == nil && flags.isCanonical()) { + dst = append(dst, src[:n]...) // copy the string verbatim + return dst, src[n:], nil + } + + // TODO: Implement a direct, raw-to-raw reformat for strings. + // If the escapeRune option would have resulted in no changes to the output, + // it would be faster to simply append src to dst without going through + // an intermediary representation in a separate buffer. + b, _ := unescapeString(make([]byte, 0, n), src[:n]) + dst, _ = appendString(dst, string(b), validateUTF8, escapeRune) + return dst, src[n:], nil +} + +// appendNumber appends src to dst as a JSON number per RFC 7159, section 6. +// It formats numbers similar to the ES6 number-to-string conversion. +// See https://go.dev/issue/14135. +// +// The output is identical to ECMA-262, 6th edition, section 7.1.12.1 and with +// RFC 8785, section 3.2.2.3 for 64-bit floating-point numbers except for -0, +// which is formatted as -0 instead of just 0. +// +// For 32-bit floating-point numbers, +// the output is a 32-bit equivalent of the algorithm. +// Note that ECMA-262 specifies no algorithm for 32-bit numbers. +func appendNumber(dst []byte, src float64, bits int) []byte { + if bits == 32 { + src = float64(float32(src)) + } + + abs := math.Abs(src) + fmt := byte('f') + if abs != 0 { + if bits == 64 && (float64(abs) < 1e-6 || float64(abs) >= 1e21) || + bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + dst = strconv.AppendFloat(dst, src, fmt, -1, bits) + if fmt == 'e' { + // Clean up e-09 to e-9. + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + return dst +} + +// reformatNumber consumes a JSON string from src and appends it to dst, +// canonicalizing it if specified. +// It returns the appended output and the remainder of the input. +func reformatNumber(dst, src []byte, canonicalize bool) ([]byte, []byte, error) { + n, err := consumeNumber(src) + if err != nil { + return dst, src[n:], err + } + if !canonicalize { + dst = append(dst, src[:n]...) // copy the number verbatim + return dst, src[n:], nil + } + + // Canonicalize the number per RFC 8785, section 3.2.2.3. + // As an optimization, we can copy integer numbers below 2⁵³ verbatim. + const maxExactIntegerDigits = 16 // len(strconv.AppendUint(nil, 1<<53, 10)) + if n < maxExactIntegerDigits && consumeSimpleNumber(src[:n]) == n { + dst = append(dst, src[:n]...) // copy the number verbatim + return dst, src[n:], nil + } + fv, _ := strconv.ParseFloat(string(src[:n]), 64) + switch { + case fv == 0: + fv = 0 // normalize negative zero as just zero + case math.IsInf(fv, +1): + fv = +math.MaxFloat64 + case math.IsInf(fv, -1): + fv = -math.MaxFloat64 + } + return appendNumber(dst, fv, 64), src[n:], nil +} + +// appendHexUint16 appends src to dst as a 4-byte hexadecimal number. +func appendHexUint16(dst []byte, src uint16) []byte { + dst = append(dst, "0000"[1+(bits.Len16(src)-1)/4:]...) + dst = strconv.AppendUint(dst, uint64(src), 16) + return dst +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go new file mode 100644 index 000000000..35be8601e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go @@ -0,0 +1,183 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +const errorPrefix = "json: " + +// Error matches errors returned by this package according to errors.Is. +const Error = jsonError("json error") + +type jsonError string + +func (e jsonError) Error() string { + return string(e) +} +func (e jsonError) Is(target error) bool { + return e == target || target == Error +} + +type ioError struct { + action string // either "read" or "write" + err error +} + +func (e *ioError) Error() string { + return errorPrefix + e.action + " error: " + e.err.Error() +} +func (e *ioError) Unwrap() error { + return e.err +} +func (e *ioError) Is(target error) bool { + return e == target || target == Error || errors.Is(e.err, target) +} + +// SemanticError describes an error determining the meaning +// of JSON data as Go data or vice-versa. +// +// The contents of this error as produced by this package may change over time. +type SemanticError struct { + requireKeyedLiterals + nonComparable + + action string // either "marshal" or "unmarshal" + + // ByteOffset indicates that an error occurred after this byte offset. + ByteOffset int64 + // JSONPointer indicates that an error occurred within this JSON value + // as indicated using the JSON Pointer notation (see RFC 6901). + JSONPointer string + + // JSONKind is the JSON kind that could not be handled. + JSONKind Kind // may be zero if unknown + // GoType is the Go type that could not be handled. + GoType reflect.Type // may be nil if unknown + + // Err is the underlying error. + Err error // may be nil +} + +func (e *SemanticError) Error() string { + var sb strings.Builder + sb.WriteString(errorPrefix) + + // Hyrum-proof the error message by deliberately switching between + // two equivalent renderings of the same error message. + // The randomization is tied to the Hyrum-proofing already applied + // on map iteration in Go. + for phrase := range map[string]struct{}{"cannot": {}, "unable to": {}} { + sb.WriteString(phrase) + break // use whichever phrase we get in the first iteration + } + + // Format action. + var preposition string + switch e.action { + case "marshal": + sb.WriteString(" marshal") + preposition = " from" + case "unmarshal": + sb.WriteString(" unmarshal") + preposition = " into" + default: + sb.WriteString(" handle") + preposition = " with" + } + + // Format JSON kind. + var omitPreposition bool + switch e.JSONKind { + case 'n': + sb.WriteString(" JSON null") + case 'f', 't': + sb.WriteString(" JSON boolean") + case '"': + sb.WriteString(" JSON string") + case '0': + sb.WriteString(" JSON number") + case '{', '}': + sb.WriteString(" JSON object") + case '[', ']': + sb.WriteString(" JSON array") + default: + omitPreposition = true + } + + // Format Go type. + if e.GoType != nil { + if !omitPreposition { + sb.WriteString(preposition) + } + sb.WriteString(" Go value of type ") + sb.WriteString(e.GoType.String()) + } + + // Format where. + switch { + case e.JSONPointer != "": + sb.WriteString(" within JSON value at ") + sb.WriteString(strconv.Quote(e.JSONPointer)) + case e.ByteOffset > 0: + sb.WriteString(" after byte offset ") + sb.WriteString(strconv.FormatInt(e.ByteOffset, 10)) + } + + // Format underlying error. + if e.Err != nil { + sb.WriteString(": ") + sb.WriteString(e.Err.Error()) + } + + return sb.String() +} +func (e *SemanticError) Is(target error) bool { + return e == target || target == Error || errors.Is(e.Err, target) +} +func (e *SemanticError) Unwrap() error { + return e.Err +} + +// SyntacticError is a description of a syntactic error that occurred when +// encoding or decoding JSON according to the grammar. +// +// The contents of this error as produced by this package may change over time. +type SyntacticError struct { + requireKeyedLiterals + nonComparable + + // ByteOffset indicates that an error occurred after this byte offset. + ByteOffset int64 + str string +} + +func (e *SyntacticError) Error() string { + return errorPrefix + e.str +} +func (e *SyntacticError) Is(target error) bool { + return e == target || target == Error +} +func (e *SyntacticError) withOffset(pos int64) error { + return &SyntacticError{ByteOffset: pos, str: e.str} +} + +func newInvalidCharacterError(prefix []byte, where string) *SyntacticError { + what := quoteRune(prefix) + return &SyntacticError{str: "invalid character " + what + " " + where} +} + +func quoteRune(b []byte) string { + r, n := utf8.DecodeRune(b) + if r == utf8.RuneError && n == 1 { + return `'\x` + strconv.FormatUint(uint64(b[0]), 16) + `'` + } + return strconv.QuoteRune(r) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go new file mode 100644 index 000000000..c0ee36166 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go @@ -0,0 +1,509 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +var errIgnoredField = errors.New("ignored field") + +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() + +type structFields struct { + flattened []structField // listed in depth-first ordering + byActualName map[string]*structField + byFoldedName map[string][]*structField + inlinedFallback *structField +} + +type structField struct { + id int // unique numeric ID in breadth-first ordering + index []int // index into a struct according to reflect.Type.FieldByIndex + typ reflect.Type + fncs *arshaler + isZero func(addressableValue) bool + isEmpty func(addressableValue) bool + fieldOptions +} + +func makeStructFields(root reflect.Type) (structFields, *SemanticError) { + var fs structFields + fs.byActualName = make(map[string]*structField, root.NumField()) + fs.byFoldedName = make(map[string][]*structField, root.NumField()) + + // ambiguous is a sentinel value to indicate that at least two fields + // at the same depth have the same name, and thus cancel each other out. + // This follows the same rules as selecting a field on embedded structs + // where the shallowest field takes precedence. If more than one field + // exists at the shallowest depth, then the selection is illegal. + // See https://go.dev/ref/spec#Selectors. + ambiguous := new(structField) + + // Setup a queue for a breath-first search. + var queueIndex int + type queueEntry struct { + typ reflect.Type + index []int + visitChildren bool // whether to recursively visit inlined field in this struct + } + queue := []queueEntry{{root, nil, true}} + seen := map[reflect.Type]bool{root: true} + + // Perform a breadth-first search over all reachable fields. + // This ensures that len(f.index) will be monotonically increasing. + for queueIndex < len(queue) { + qe := queue[queueIndex] + queueIndex++ + + t := qe.typ + inlinedFallbackIndex := -1 // index of last inlined fallback field in current struct + namesIndex := make(map[string]int) // index of each field with a given JSON object name in current struct + var hasAnyJSONTag bool // whether any Go struct field has a `json` tag + var hasAnyJSONField bool // whether any JSON serializable fields exist in current struct + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + _, hasTag := sf.Tag.Lookup("json") + hasAnyJSONTag = hasAnyJSONTag || hasTag + options, err := parseFieldOptions(sf) + if err != nil { + if err == errIgnoredField { + continue + } + return structFields{}, &SemanticError{GoType: t, Err: err} + } + hasAnyJSONField = true + f := structField{ + // Allocate a new slice (len=N+1) to hold both + // the parent index (len=N) and the current index (len=1). + // Do this to avoid clobbering the memory of the parent index. + index: append(append(make([]int, 0, len(qe.index)+1), qe.index...), i), + typ: sf.Type, + fieldOptions: options, + } + if sf.Anonymous && !f.hasName { + f.inline = true // implied by use of Go embedding without an explicit name + } + if f.inline || f.unknown { + // Handle an inlined field that serializes to/from + // zero or more JSON object members. + + if f.inline && f.unknown { + err := fmt.Errorf("Go struct field %s cannot have both `inline` and `unknown` specified", sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + switch f.fieldOptions { + case fieldOptions{name: f.name, quotedName: f.quotedName, inline: true}: + case fieldOptions{name: f.name, quotedName: f.quotedName, unknown: true}: + default: + err := fmt.Errorf("Go struct field %s cannot have any options other than `inline` or `unknown` specified", sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Unwrap one level of pointer indirection similar to how Go + // only allows embedding either T or *T, but not **T. + tf := f.typ + if tf.Kind() == reflect.Pointer && tf.Name() == "" { + tf = tf.Elem() + } + // Reject any types with custom serialization otherwise + // it becomes impossible to know what sub-fields to inline. + if which, _ := implementsWhich(tf, + jsonMarshalerV2Type, jsonMarshalerV1Type, textMarshalerType, + jsonUnmarshalerV2Type, jsonUnmarshalerV1Type, textUnmarshalerType, + ); which != nil && tf != rawValueType { + err := fmt.Errorf("inlined Go struct field %s of type %s must not implement JSON marshal or unmarshal methods", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Handle an inlined field that serializes to/from + // a finite number of JSON object members backed by a Go struct. + if tf.Kind() == reflect.Struct { + if f.unknown { + err := fmt.Errorf("inlined Go struct field %s of type %s with `unknown` tag must be a Go map of string key or a json.RawValue", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + if qe.visitChildren { + queue = append(queue, queueEntry{tf, f.index, !seen[tf]}) + } + seen[tf] = true + continue + } + + // Handle an inlined field that serializes to/from any number of + // JSON object members back by a Go map or RawValue. + switch { + case tf == rawValueType: + f.fncs = nil // specially handled in arshal_inlined.go + case tf.Kind() == reflect.Map && tf.Key() == stringType: + f.fncs = lookupArshaler(tf.Elem()) + default: + err := fmt.Errorf("inlined Go struct field %s of type %s must be a Go struct, Go map of string key, or json.RawValue", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Reject multiple inlined fallback fields within the same struct. + if inlinedFallbackIndex >= 0 { + err := fmt.Errorf("inlined Go struct fields %s and %s cannot both be a Go map or json.RawValue", t.Field(inlinedFallbackIndex).Name, sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + inlinedFallbackIndex = i + + // Multiple inlined fallback fields across different structs + // follow the same precedence rules as Go struct embedding. + if fs.inlinedFallback == nil { + fs.inlinedFallback = &f // store first occurrence at lowest depth + } else if len(fs.inlinedFallback.index) == len(f.index) { + fs.inlinedFallback = ambiguous // at least two occurrences at same depth + } + } else { + // Handle normal Go struct field that serializes to/from + // a single JSON object member. + + // Provide a function that uses a type's IsZero method. + switch { + case sf.Type.Kind() == reflect.Interface && sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return va.IsNil() || (va.Elem().Kind() == reflect.Pointer && va.Elem().IsNil()) || va.Interface().(isZeroer).IsZero() + } + case sf.Type.Kind() == reflect.Pointer && sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { + // Avoid panics calling IsZero on nil pointer. + return va.IsNil() || va.Interface().(isZeroer).IsZero() + } + case sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { return va.Interface().(isZeroer).IsZero() } + case reflect.PointerTo(sf.Type).Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { return va.Addr().Interface().(isZeroer).IsZero() } + } + + // Provide a function that can determine whether the value would + // serialize as an empty JSON value. + switch sf.Type.Kind() { + case reflect.String, reflect.Map, reflect.Array, reflect.Slice: + f.isEmpty = func(va addressableValue) bool { return va.Len() == 0 } + case reflect.Pointer, reflect.Interface: + f.isEmpty = func(va addressableValue) bool { return va.IsNil() } + } + + f.id = len(fs.flattened) + f.fncs = lookupArshaler(sf.Type) + fs.flattened = append(fs.flattened, f) + + // Reject user-specified names with invalid UTF-8. + if !utf8.ValidString(f.name) { + err := fmt.Errorf("Go struct field %s has JSON object name %q with invalid UTF-8", sf.Name, f.name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + // Reject multiple fields with same name within the same struct. + if j, ok := namesIndex[f.name]; ok { + err := fmt.Errorf("Go struct fields %s and %s conflict over JSON object name %q", t.Field(j).Name, sf.Name, f.name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + namesIndex[f.name] = i + + // Multiple fields of the same name across different structs + // follow the same precedence rules as Go struct embedding. + if f2 := fs.byActualName[f.name]; f2 == nil { + fs.byActualName[f.name] = &fs.flattened[len(fs.flattened)-1] // store first occurrence at lowest depth + } else if len(f2.index) == len(f.index) { + fs.byActualName[f.name] = ambiguous // at least two occurrences at same depth + } + } + } + + // NOTE: New users to the json package are occasionally surprised that + // unexported fields are ignored. This occurs by necessity due to our + // inability to directly introspect such fields with Go reflection + // without the use of unsafe. + // + // To reduce friction here, refuse to serialize any Go struct that + // has no JSON serializable fields, has at least one Go struct field, + // and does not have any `json` tags present. For example, + // errors returned by errors.New would fail to serialize. + isEmptyStruct := t.NumField() == 0 + if !isEmptyStruct && !hasAnyJSONTag && !hasAnyJSONField { + err := errors.New("Go struct has no exported fields") + return structFields{}, &SemanticError{GoType: t, Err: err} + } + } + + // Remove all fields that are duplicates. + // This may move elements forward to fill the holes from removed fields. + var n int + for _, f := range fs.flattened { + switch f2 := fs.byActualName[f.name]; { + case f2 == ambiguous: + delete(fs.byActualName, f.name) + case f2 == nil: + continue // may be nil due to previous delete + // TODO(https://go.dev/issue/45955): Use slices.Equal. + case reflect.DeepEqual(f.index, f2.index): + f.id = n + fs.flattened[n] = f + fs.byActualName[f.name] = &fs.flattened[n] // fix pointer to new location + n++ + } + } + fs.flattened = fs.flattened[:n] + if fs.inlinedFallback == ambiguous { + fs.inlinedFallback = nil + } + if len(fs.flattened) != len(fs.byActualName) { + panic(fmt.Sprintf("BUG: flattened list of fields mismatches fields mapped by name: %d != %d", len(fs.flattened), len(fs.byActualName))) + } + + // Sort the fields according to a depth-first ordering. + // This operation will cause pointers in byActualName to become incorrect, + // which we will correct in another loop shortly thereafter. + sort.Slice(fs.flattened, func(i, j int) bool { + si := fs.flattened[i].index + sj := fs.flattened[j].index + for len(si) > 0 && len(sj) > 0 { + switch { + case si[0] < sj[0]: + return true + case si[0] > sj[0]: + return false + default: + si = si[1:] + sj = sj[1:] + } + } + return len(si) < len(sj) + }) + + // Recompute the mapping of fields in the byActualName map. + // Pre-fold all names so that we can lookup folded names quickly. + for i, f := range fs.flattened { + foldedName := string(foldName([]byte(f.name))) + fs.byActualName[f.name] = &fs.flattened[i] + fs.byFoldedName[foldedName] = append(fs.byFoldedName[foldedName], &fs.flattened[i]) + } + for foldedName, fields := range fs.byFoldedName { + if len(fields) > 1 { + // The precedence order for conflicting nocase names + // is by breadth-first order, rather than depth-first order. + sort.Slice(fields, func(i, j int) bool { + return fields[i].id < fields[j].id + }) + fs.byFoldedName[foldedName] = fields + } + } + + return fs, nil +} + +type fieldOptions struct { + name string + quotedName string // quoted name per RFC 8785, section 3.2.2.2. + hasName bool + nocase bool + inline bool + unknown bool + omitzero bool + omitempty bool + string bool + format string +} + +// parseFieldOptions parses the `json` tag in a Go struct field as +// a structured set of options configuring parameters such as +// the JSON member name and other features. +// As a special case, it returns errIgnoredField if the field is ignored. +func parseFieldOptions(sf reflect.StructField) (out fieldOptions, err error) { + tag, hasTag := sf.Tag.Lookup("json") + + // Check whether this field is explicitly ignored. + if tag == "-" { + return fieldOptions{}, errIgnoredField + } + + // Check whether this field is unexported. + if !sf.IsExported() { + // In contrast to v1, v2 no longer forwards exported fields from + // embedded fields of unexported types since Go reflection does not + // allow the same set of operations that are available in normal cases + // of purely exported fields. + // See https://go.dev/issue/21357 and https://go.dev/issue/24153. + if sf.Anonymous { + return fieldOptions{}, fmt.Errorf("embedded Go struct field %s of an unexported type must be explicitly ignored with a `json:\"-\"` tag", sf.Type.Name()) + } + // Tag options specified on an unexported field suggests user error. + if hasTag { + return fieldOptions{}, fmt.Errorf("unexported Go struct field %s cannot have non-ignored `json:%q` tag", sf.Name, tag) + } + return fieldOptions{}, errIgnoredField + } + + // Determine the JSON member name for this Go field. A user-specified name + // may be provided as either an identifier or a single-quoted string. + // The single-quoted string allows arbitrary characters in the name. + // See https://go.dev/issue/2718 and https://go.dev/issue/3546. + out.name = sf.Name // always starts with an uppercase character + if len(tag) > 0 && !strings.HasPrefix(tag, ",") { + // For better compatibility with v1, accept almost any unescaped name. + n := len(tag) - len(strings.TrimLeftFunc(tag, func(r rune) bool { + return !strings.ContainsRune(",\\'\"`", r) // reserve comma, backslash, and quotes + })) + opt := tag[:n] + if n == 0 { + // Allow a single quoted string for arbitrary names. + opt, n, err = consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: %v", sf.Name, err) + } + } + out.hasName = true + out.name = opt + tag = tag[n:] + } + b, _ := appendString(nil, out.name, false, nil) + out.quotedName = string(b) + + // Handle any additional tag options (if any). + var wasFormat bool + seenOpts := make(map[string]bool) + for len(tag) > 0 { + // Consume comma delimiter. + if tag[0] != ',' { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: invalid character %q before next option (expecting ',')", sf.Name, tag[0]) + } + tag = tag[len(","):] + if len(tag) == 0 { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: invalid trailing ',' character", sf.Name) + } + + // Consume and process the tag option. + opt, n, err := consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: %v", sf.Name, err) + } + rawOpt := tag[:n] + tag = tag[n:] + switch { + case wasFormat: + return fieldOptions{}, fmt.Errorf("Go struct field %s has `format` tag option that was not specified last", sf.Name) + case strings.HasPrefix(rawOpt, "'") && strings.TrimFunc(opt, isLetterOrDigit) == "": + return fieldOptions{}, fmt.Errorf("Go struct field %s has unnecessarily quoted appearance of `%s` tag option; specify `%s` instead", sf.Name, rawOpt, opt) + } + switch opt { + case "nocase": + out.nocase = true + case "inline": + out.inline = true + case "unknown": + out.unknown = true + case "omitzero": + out.omitzero = true + case "omitempty": + out.omitempty = true + case "string": + out.string = true + case "format": + if !strings.HasPrefix(tag, ":") { + return fieldOptions{}, fmt.Errorf("Go struct field %s is missing value for `format` tag option", sf.Name) + } + tag = tag[len(":"):] + opt, n, err := consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed value for `format` tag option: %v", sf.Name, err) + } + tag = tag[n:] + out.format = opt + wasFormat = true + default: + // Reject keys that resemble one of the supported options. + // This catches invalid mutants such as "omitEmpty" or "omit_empty". + normOpt := strings.ReplaceAll(strings.ToLower(opt), "_", "") + switch normOpt { + case "nocase", "inline", "unknown", "omitzero", "omitempty", "string", "format": + return fieldOptions{}, fmt.Errorf("Go struct field %s has invalid appearance of `%s` tag option; specify `%s` instead", sf.Name, opt, normOpt) + } + + // NOTE: Everything else is ignored. This does not mean it is + // forward compatible to insert arbitrary tag options since + // a future version of this package may understand that tag. + } + + // Reject duplicates. + if seenOpts[opt] { + return fieldOptions{}, fmt.Errorf("Go struct field %s has duplicate appearance of `%s` tag option", sf.Name, rawOpt) + } + seenOpts[opt] = true + } + return out, nil +} + +func consumeTagOption(in string) (string, int, error) { + switch r, _ := utf8.DecodeRuneInString(in); { + // Option as a Go identifier. + case r == '_' || unicode.IsLetter(r): + n := len(in) - len(strings.TrimLeftFunc(in, isLetterOrDigit)) + return in[:n], n, nil + // Option as a single-quoted string. + case r == '\'': + // The grammar is nearly identical to a double-quoted Go string literal, + // but uses single quotes as the terminators. The reason for a custom + // grammar is because both backtick and double quotes cannot be used + // verbatim in a struct tag. + // + // Convert a single-quoted string to a double-quote string and rely on + // strconv.Unquote to handle the rest. + var inEscape bool + b := []byte{'"'} + n := len(`'`) + for len(in) > n { + r, rn := utf8.DecodeRuneInString(in[n:]) + switch { + case inEscape: + if r == '\'' { + b = b[:len(b)-1] // remove escape character: `\'` => `'` + } + inEscape = false + case r == '\\': + inEscape = true + case r == '"': + b = append(b, '\\') // insert escape character: `"` => `\"` + case r == '\'': + b = append(b, '"') + n += len(`'`) + out, err := strconv.Unquote(string(b)) + if err != nil { + return "", 0, fmt.Errorf("invalid single-quoted string: %s", in[:n]) + } + return out, n, nil + } + b = append(b, in[n:][:rn]...) + n += rn + } + if n > 10 { + n = 10 // limit the amount of context printed in the error + } + return "", 0, fmt.Errorf("single-quoted string not terminated: %s...", in[:n]) + case len(in) == 0: + return "", 0, io.ErrUnexpectedEOF + default: + return "", 0, fmt.Errorf("invalid character %q at start of option (expecting Unicode letter or single quote)", r) + } +} + +func isLetterOrDigit(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go new file mode 100644 index 000000000..9ab735814 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "unicode" + "unicode/utf8" +) + +// foldName returns a folded string such that foldName(x) == foldName(y) +// is similar to strings.EqualFold(x, y), but ignores underscore and dashes. +// This allows foldName to match common naming conventions. +func foldName(in []byte) []byte { + // This is inlinable to take advantage of "function outlining". + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + var arr [32]byte // large enough for most JSON names + return appendFoldedName(arr[:0], in) +} +func appendFoldedName(out, in []byte) []byte { + for i := 0; i < len(in); { + // Handle single-byte ASCII. + if c := in[i]; c < utf8.RuneSelf { + if c != '_' && c != '-' { + if 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + out = append(out, c) + } + i++ + continue + } + // Handle multi-byte Unicode. + r, n := utf8.DecodeRune(in[i:]) + out = utf8.AppendRune(out, foldRune(r)) + i += n + } + return out +} + +// foldRune is a variation on unicode.SimpleFold that returns the same rune +// for all runes in the same fold set. +// +// Invariant: +// +// foldRune(x) == foldRune(y) ⇔ strings.EqualFold(string(x), string(y)) +func foldRune(r rune) rune { + for { + r2 := unicode.SimpleFold(r) + if r2 <= r { + return r2 // smallest character in the fold set + } + r = r2 + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go new file mode 100644 index 000000000..700a56db0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/binary" + "math/bits" +) + +// stringCache is a cache for strings converted from a []byte. +type stringCache [256]string // 256*unsafe.Sizeof(string("")) => 4KiB + +// make returns the string form of b. +// It returns a pre-allocated string from c if present, otherwise +// it allocates a new string, inserts it into the cache, and returns it. +func (c *stringCache) make(b []byte) string { + const ( + minCachedLen = 2 // single byte strings are already interned by the runtime + maxCachedLen = 256 // large enough for UUIDs, IPv6 addresses, SHA-256 checksums, etc. + ) + if c == nil || len(b) < minCachedLen || len(b) > maxCachedLen { + return string(b) + } + + // Compute a hash from the fixed-width prefix and suffix of the string. + // This ensures hashing a string is a constant time operation. + var h uint32 + switch { + case len(b) >= 8: + lo := binary.LittleEndian.Uint64(b[:8]) + hi := binary.LittleEndian.Uint64(b[len(b)-8:]) + h = hash64(uint32(lo), uint32(lo>>32)) ^ hash64(uint32(hi), uint32(hi>>32)) + case len(b) >= 4: + lo := binary.LittleEndian.Uint32(b[:4]) + hi := binary.LittleEndian.Uint32(b[len(b)-4:]) + h = hash64(lo, hi) + case len(b) >= 2: + lo := binary.LittleEndian.Uint16(b[:2]) + hi := binary.LittleEndian.Uint16(b[len(b)-2:]) + h = hash64(uint32(lo), uint32(hi)) + } + + // Check the cache for the string. + i := h % uint32(len(*c)) + if s := (*c)[i]; s == string(b) { + return s + } + s := string(b) + (*c)[i] = s + return s +} + +// hash64 returns the hash of two uint32s as a single uint32. +func hash64(lo, hi uint32) uint32 { + // If avalanche=true, this is identical to XXH32 hash on a 8B string: + // var b [8]byte + // binary.LittleEndian.PutUint32(b[:4], lo) + // binary.LittleEndian.PutUint32(b[4:], hi) + // return xxhash.Sum32(b[:]) + const ( + prime1 = 0x9e3779b1 + prime2 = 0x85ebca77 + prime3 = 0xc2b2ae3d + prime4 = 0x27d4eb2f + prime5 = 0x165667b1 + ) + h := prime5 + uint32(8) + h += lo * prime3 + h = bits.RotateLeft32(h, 17) * prime4 + h += hi * prime3 + h = bits.RotateLeft32(h, 17) * prime4 + // Skip final mix (avalanche) step of XXH32 for performance reasons. + // Empirical testing shows that the improvements in unbiased distribution + // does not outweigh the extra cost in computational complexity. + const avalanche = false + if avalanche { + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + } + return h +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go new file mode 100644 index 000000000..60e93270f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go @@ -0,0 +1,182 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "io" + "math/bits" + "sort" + "sync" +) + +// TODO(https://go.dev/issue/47657): Use sync.PoolOf. + +var ( + // This owns the internal buffer since there is no io.Writer to output to. + // Since the buffer can get arbitrarily large in normal usage, + // there is statistical tracking logic to determine whether to recycle + // the internal buffer or not based on a history of utilization. + bufferedEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} + + // This owns the internal buffer, but it is only used to temporarily store + // buffered JSON before flushing it to the underlying io.Writer. + // In a sufficiently efficient streaming mode, we do not expect the buffer + // to grow arbitrarily large. Thus, we avoid recycling large buffers. + streamingEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} + + // This does not own the internal buffer since + // it is taken directly from the provided bytes.Buffer. + bytesBufferEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} +) + +// bufferStatistics is statistics to track buffer utilization. +// It is used to determine whether to recycle a buffer or not +// to avoid https://go.dev/issue/23199. +type bufferStatistics struct { + strikes int // number of times the buffer was under-utilized + prevLen int // length of previous buffer +} + +func getBufferedEncoder(o EncodeOptions) *Encoder { + e := bufferedEncoderPool.Get().(*Encoder) + if e.buf == nil { + // Round up to nearest 2ⁿ to make best use of malloc size classes. + // See runtime/sizeclasses.go on Go1.15. + // Logical OR with 63 to ensure 64 as the minimum buffer size. + n := 1 << bits.Len(uint(e.bufStats.prevLen|63)) + e.buf = make([]byte, 0, n) + } + e.reset(e.buf[:0], nil, o) + return e +} +func putBufferedEncoder(e *Encoder) { + // Recycle large buffers only if sufficiently utilized. + // If a buffer is under-utilized enough times sequentially, + // then it is discarded, ensuring that a single large buffer + // won't be kept alive by a continuous stream of small usages. + // + // The worst case utilization is computed as: + // MIN_UTILIZATION_THRESHOLD / (1 + MAX_NUM_STRIKES) + // + // For the constants chosen below, this is (25%)/(1+4) ⇒ 5%. + // This may seem low, but it ensures a lower bound on + // the absolute worst-case utilization. Without this check, + // this would be theoretically 0%, which is infinitely worse. + // + // See https://go.dev/issue/27735. + switch { + case cap(e.buf) <= 4<<10: // always recycle buffers smaller than 4KiB + e.bufStats.strikes = 0 + case cap(e.buf)/4 <= len(e.buf): // at least 25% utilization + e.bufStats.strikes = 0 + case e.bufStats.strikes < 4: // at most 4 strikes + e.bufStats.strikes++ + default: // discard the buffer; too large and too often under-utilized + e.bufStats.strikes = 0 + e.bufStats.prevLen = len(e.buf) // heuristic for size to allocate next time + e.buf = nil + } + bufferedEncoderPool.Put(e) +} + +func getStreamingEncoder(w io.Writer, o EncodeOptions) *Encoder { + if _, ok := w.(*bytes.Buffer); ok { + e := bytesBufferEncoderPool.Get().(*Encoder) + e.reset(nil, w, o) // buffer taken from bytes.Buffer + return e + } else { + e := streamingEncoderPool.Get().(*Encoder) + e.reset(e.buf[:0], w, o) // preserve existing buffer + return e + } +} +func putStreamingEncoder(e *Encoder) { + if _, ok := e.wr.(*bytes.Buffer); ok { + bytesBufferEncoderPool.Put(e) + } else { + if cap(e.buf) > 64<<10 { + e.buf = nil // avoid pinning arbitrarily large amounts of memory + } + streamingEncoderPool.Put(e) + } +} + +var ( + // This does not own the internal buffer since it is externally provided. + bufferedDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }} + + // This owns the internal buffer, but it is only used to temporarily store + // buffered JSON fetched from the underlying io.Reader. + // In a sufficiently efficient streaming mode, we do not expect the buffer + // to grow arbitrarily large. Thus, we avoid recycling large buffers. + streamingDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }} + + // This does not own the internal buffer since + // it is taken directly from the provided bytes.Buffer. + bytesBufferDecoderPool = bufferedDecoderPool +) + +func getBufferedDecoder(b []byte, o DecodeOptions) *Decoder { + d := bufferedDecoderPool.Get().(*Decoder) + d.reset(b, nil, o) + return d +} +func putBufferedDecoder(d *Decoder) { + bufferedDecoderPool.Put(d) +} + +func getStreamingDecoder(r io.Reader, o DecodeOptions) *Decoder { + if _, ok := r.(*bytes.Buffer); ok { + d := bytesBufferDecoderPool.Get().(*Decoder) + d.reset(nil, r, o) // buffer taken from bytes.Buffer + return d + } else { + d := streamingDecoderPool.Get().(*Decoder) + d.reset(d.buf[:0], r, o) // preserve existing buffer + return d + } +} +func putStreamingDecoder(d *Decoder) { + if _, ok := d.rd.(*bytes.Buffer); ok { + bytesBufferDecoderPool.Put(d) + } else { + if cap(d.buf) > 64<<10 { + d.buf = nil // avoid pinning arbitrarily large amounts of memory + } + streamingDecoderPool.Put(d) + } +} + +var stringsPools = &sync.Pool{New: func() any { return new(stringSlice) }} + +type stringSlice []string + +// getStrings returns a non-nil pointer to a slice with length n. +func getStrings(n int) *stringSlice { + s := stringsPools.Get().(*stringSlice) + if cap(*s) < n { + *s = make([]string, n) + } + *s = (*s)[:n] + return s +} + +func putStrings(s *stringSlice) { + if cap(*s) > 1<<10 { + *s = nil // avoid pinning arbitrarily large amounts of memory + } + stringsPools.Put(s) +} + +// Sort sorts the string slice according to RFC 8785, section 3.2.3. +func (ss *stringSlice) Sort() { + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Sort(ss) +} + +func (ss *stringSlice) Len() int { return len(*ss) } +func (ss *stringSlice) Less(i, j int) bool { return lessUTF16((*ss)[i], (*ss)[j]) } +func (ss *stringSlice) Swap(i, j int) { (*ss)[i], (*ss)[j] = (*ss)[j], (*ss)[i] } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go new file mode 100644 index 000000000..ee14c753f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go @@ -0,0 +1,747 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "strconv" +) + +var ( + errMissingName = &SyntacticError{str: "missing string for object name"} + errMissingColon = &SyntacticError{str: "missing character ':' after object name"} + errMissingValue = &SyntacticError{str: "missing value after object name"} + errMissingComma = &SyntacticError{str: "missing character ',' after object or array value"} + errMismatchDelim = &SyntacticError{str: "mismatching structural token for object or array"} +) + +const errInvalidNamespace = jsonError("object namespace is in an invalid state") + +type state struct { + // tokens validates whether the next token kind is valid. + tokens stateMachine + + // names is a stack of object names. + // Not used if AllowDuplicateNames is true. + names objectNameStack + + // namespaces is a stack of object namespaces. + // For performance reasons, Encoder or Decoder may not update this + // if Marshal or Unmarshal is able to track names in a more efficient way. + // See makeMapArshaler and makeStructArshaler. + // Not used if AllowDuplicateNames is true. + namespaces objectNamespaceStack +} + +func (s *state) reset() { + s.tokens.reset() + s.names.reset() + s.namespaces.reset() +} + +// appendStackPointer appends a JSON Pointer (RFC 6901) to the current value. +// The returned pointer is only accurate if s.names is populated, +// otherwise it uses the numeric index as the object member name. +// +// Invariant: Must call s.names.copyQuotedBuffer beforehand. +func (s state) appendStackPointer(b []byte) []byte { + var objectDepth int + for i := 1; i < s.tokens.depth(); i++ { + e := s.tokens.index(i) + if e.length() == 0 { + break // empty object or array + } + b = append(b, '/') + switch { + case e.isObject(): + if objectDepth < s.names.length() { + for _, c := range s.names.getUnquoted(objectDepth) { + // Per RFC 6901, section 3, escape '~' and '/' characters. + switch c { + case '~': + b = append(b, "~0"...) + case '/': + b = append(b, "~1"...) + default: + b = append(b, c) + } + } + } else { + // Since the names stack is unpopulated, the name is unknown. + // As a best-effort replacement, use the numeric member index. + // While inaccurate, it produces a syntactically valid pointer. + b = strconv.AppendUint(b, uint64((e.length()-1)/2), 10) + } + objectDepth++ + case e.isArray(): + b = strconv.AppendUint(b, uint64(e.length()-1), 10) + } + } + return b +} + +// stateMachine is a push-down automaton that validates whether +// a sequence of tokens is valid or not according to the JSON grammar. +// It is useful for both encoding and decoding. +// +// It is a stack where each entry represents a nested JSON object or array. +// The stack has a minimum depth of 1 where the first level is a +// virtual JSON array to handle a stream of top-level JSON values. +// The top-level virtual JSON array is special in that it doesn't require commas +// between each JSON value. +// +// For performance, most methods are carefully written to be inlineable. +// The zero value is a valid state machine ready for use. +type stateMachine struct { + stack []stateEntry + last stateEntry +} + +// reset resets the state machine. +// The machine always starts with a minimum depth of 1. +func (m *stateMachine) reset() { + m.stack = m.stack[:0] + if cap(m.stack) > 1<<10 { + m.stack = nil + } + m.last = stateTypeArray +} + +// depth is the current nested depth of JSON objects and arrays. +// It is one-indexed (i.e., top-level values have a depth of 1). +func (m stateMachine) depth() int { + return len(m.stack) + 1 +} + +// index returns a reference to the ith entry. +// It is only valid until the next push method call. +func (m *stateMachine) index(i int) *stateEntry { + if i == len(m.stack) { + return &m.last + } + return &m.stack[i] +} + +// depthLength reports the current nested depth and +// the length of the last JSON object or array. +func (m stateMachine) depthLength() (int, int) { + return m.depth(), m.last.length() +} + +// appendLiteral appends a JSON literal as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendLiteral() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + return nil + } +} + +// appendString appends a JSON string as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendString() error { + switch { + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + return nil + } +} + +// appendNumber appends a JSON number as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendNumber() error { + return m.appendLiteral() +} + +// pushObject appends a JSON start object token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) pushObject() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + m.stack = append(m.stack, m.last) + m.last = stateTypeObject + return nil + } +} + +// popObject appends a JSON end object token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) popObject() error { + switch { + case !m.last.isObject(): + return errMismatchDelim + case m.last.needObjectValue(): + return errMissingValue + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last = m.stack[len(m.stack)-1] + m.stack = m.stack[:len(m.stack)-1] + return nil + } +} + +// pushArray appends a JSON start array token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) pushArray() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + m.stack = append(m.stack, m.last) + m.last = stateTypeArray + return nil + } +} + +// popArray appends a JSON end array token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) popArray() error { + switch { + case !m.last.isArray() || len(m.stack) == 0: // forbid popping top-level virtual JSON array + return errMismatchDelim + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last = m.stack[len(m.stack)-1] + m.stack = m.stack[:len(m.stack)-1] + return nil + } +} + +// needIndent reports whether indent whitespace should be injected. +// A zero value means that no whitespace should be injected. +// A positive value means '\n', indentPrefix, and (n-1) copies of indentBody +// should be appended to the output immediately before the next token. +func (m stateMachine) needIndent(next Kind) (n int) { + willEnd := next == '}' || next == ']' + switch { + case m.depth() == 1: + return 0 // top-level values are never indented + case m.last.length() == 0 && willEnd: + return 0 // an empty object or array is never indented + case m.last.length() == 0 || m.last.needImplicitComma(next): + return m.depth() + case willEnd: + return m.depth() - 1 + default: + return 0 + } +} + +// mayAppendDelim appends a colon or comma that may precede the next token. +func (m stateMachine) mayAppendDelim(b []byte, next Kind) []byte { + switch { + case m.last.needImplicitColon(): + return append(b, ':') + case m.last.needImplicitComma(next) && len(m.stack) != 0: // comma not needed for top-level values + return append(b, ',') + default: + return b + } +} + +// needDelim reports whether a colon or comma token should be implicitly emitted +// before the next token of the specified kind. +// A zero value means no delimiter should be emitted. +func (m stateMachine) needDelim(next Kind) (delim byte) { + switch { + case m.last.needImplicitColon(): + return ':' + case m.last.needImplicitComma(next) && len(m.stack) != 0: // comma not needed for top-level values + return ',' + default: + return 0 + } +} + +// checkDelim reports whether the specified delimiter should be there given +// the kind of the next token that appears immediately afterwards. +func (m stateMachine) checkDelim(delim byte, next Kind) error { + switch needDelim := m.needDelim(next); { + case needDelim == delim: + return nil + case needDelim == ':': + return errMissingColon + case needDelim == ',': + return errMissingComma + default: + return newInvalidCharacterError([]byte{delim}, "before next token") + } +} + +// invalidateDisabledNamespaces marks all disabled namespaces as invalid. +// +// For efficiency, Marshal and Unmarshal may disable namespaces since there are +// more efficient ways to track duplicate names. However, if an error occurs, +// the namespaces in Encoder or Decoder will be left in an inconsistent state. +// Mark the namespaces as invalid so that future method calls on +// Encoder or Decoder will return an error. +func (m *stateMachine) invalidateDisabledNamespaces() { + for i := 0; i < m.depth(); i++ { + e := m.index(i) + if !e.isActiveNamespace() { + e.invalidateNamespace() + } + } +} + +// stateEntry encodes several artifacts within a single unsigned integer: +// - whether this represents a JSON object or array, +// - whether this object should check for duplicate names, and +// - how many elements are in this JSON object or array. +type stateEntry uint64 + +const ( + // The type mask (1 bit) records whether this is a JSON object or array. + stateTypeMask stateEntry = 0x8000_0000_0000_0000 + stateTypeObject stateEntry = 0x8000_0000_0000_0000 + stateTypeArray stateEntry = 0x0000_0000_0000_0000 + + // The name check mask (2 bit) records whether to update + // the namespaces for the current JSON object and + // whether the namespace is valid. + stateNamespaceMask stateEntry = 0x6000_0000_0000_0000 + stateDisableNamespace stateEntry = 0x4000_0000_0000_0000 + stateInvalidNamespace stateEntry = 0x2000_0000_0000_0000 + + // The count mask (61 bits) records the number of elements. + stateCountMask stateEntry = 0x1fff_ffff_ffff_ffff + stateCountLSBMask stateEntry = 0x0000_0000_0000_0001 + stateCountOdd stateEntry = 0x0000_0000_0000_0001 + stateCountEven stateEntry = 0x0000_0000_0000_0000 +) + +// length reports the number of elements in the JSON object or array. +// Each name and value in an object entry is treated as a separate element. +func (e stateEntry) length() int { + return int(e & stateCountMask) +} + +// isObject reports whether this is a JSON object. +func (e stateEntry) isObject() bool { + return e&stateTypeMask == stateTypeObject +} + +// isArray reports whether this is a JSON array. +func (e stateEntry) isArray() bool { + return e&stateTypeMask == stateTypeArray +} + +// needObjectName reports whether the next token must be a JSON string, +// which is necessary for JSON object names. +func (e stateEntry) needObjectName() bool { + return e&(stateTypeMask|stateCountLSBMask) == stateTypeObject|stateCountEven +} + +// needImplicitColon reports whether an colon should occur next, +// which always occurs after JSON object names. +func (e stateEntry) needImplicitColon() bool { + return e.needObjectValue() +} + +// needObjectValue reports whether the next token must be a JSON value, +// which is necessary after every JSON object name. +func (e stateEntry) needObjectValue() bool { + return e&(stateTypeMask|stateCountLSBMask) == stateTypeObject|stateCountOdd +} + +// needImplicitComma reports whether an comma should occur next, +// which always occurs after a value in a JSON object or array +// before the next value (or name). +func (e stateEntry) needImplicitComma(next Kind) bool { + return !e.needObjectValue() && e.length() > 0 && next != '}' && next != ']' +} + +// increment increments the number of elements for the current object or array. +// This assumes that overflow won't practically be an issue since +// 1< 0. +func (e *stateEntry) decrement() { + (*e)-- +} + +// disableNamespace disables the JSON object namespace such that the +// Encoder or Decoder no longer updates the namespace. +func (e *stateEntry) disableNamespace() { + *e |= stateDisableNamespace +} + +// isActiveNamespace reports whether the JSON object namespace is actively +// being updated and used for duplicate name checks. +func (e stateEntry) isActiveNamespace() bool { + return e&(stateDisableNamespace) == 0 +} + +// invalidateNamespace marks the JSON object namespace as being invalid. +func (e *stateEntry) invalidateNamespace() { + *e |= stateInvalidNamespace +} + +// isValidNamespace reports whether the JSON object namespace is valid. +func (e stateEntry) isValidNamespace() bool { + return e&(stateInvalidNamespace) == 0 +} + +// objectNameStack is a stack of names when descending into a JSON object. +// In contrast to objectNamespaceStack, this only has to remember a single name +// per JSON object. +// +// This data structure may contain offsets to encodeBuffer or decodeBuffer. +// It violates clean abstraction of layers, but is significantly more efficient. +// This ensures that popping and pushing in the common case is a trivial +// push/pop of an offset integer. +// +// The zero value is an empty names stack ready for use. +type objectNameStack struct { + // offsets is a stack of offsets for each name. + // A non-negative offset is the ending offset into the local names buffer. + // A negative offset is the bit-wise inverse of a starting offset into + // a remote buffer (e.g., encodeBuffer or decodeBuffer). + // A math.MinInt offset at the end implies that the last object is empty. + // Invariant: Positive offsets always occur before negative offsets. + offsets []int + // unquotedNames is a back-to-back concatenation of names. + unquotedNames []byte +} + +func (ns *objectNameStack) reset() { + ns.offsets = ns.offsets[:0] + ns.unquotedNames = ns.unquotedNames[:0] + if cap(ns.offsets) > 1<<6 { + ns.offsets = nil // avoid pinning arbitrarily large amounts of memory + } + if cap(ns.unquotedNames) > 1<<10 { + ns.unquotedNames = nil // avoid pinning arbitrarily large amounts of memory + } +} + +func (ns *objectNameStack) length() int { + return len(ns.offsets) +} + +// getUnquoted retrieves the ith unquoted name in the namespace. +// It returns an empty string if the last object is empty. +// +// Invariant: Must call copyQuotedBuffer beforehand. +func (ns *objectNameStack) getUnquoted(i int) []byte { + ns.ensureCopiedBuffer() + if i == 0 { + return ns.unquotedNames[:ns.offsets[0]] + } else { + return ns.unquotedNames[ns.offsets[i-1]:ns.offsets[i-0]] + } +} + +// invalidOffset indicates that the last JSON object currently has no name. +const invalidOffset = math.MinInt + +// push descends into a nested JSON object. +func (ns *objectNameStack) push() { + ns.offsets = append(ns.offsets, invalidOffset) +} + +// replaceLastQuotedOffset replaces the last name with the starting offset +// to the quoted name in some remote buffer. All offsets provided must be +// relative to the same buffer until copyQuotedBuffer is called. +func (ns *objectNameStack) replaceLastQuotedOffset(i int) { + // Use bit-wise inversion instead of naive multiplication by -1 to avoid + // ambiguity regarding zero (which is a valid offset into the names field). + // Bit-wise inversion is mathematically equivalent to -i-1, + // such that 0 becomes -1, 1 becomes -2, and so forth. + // This ensures that remote offsets are always negative. + ns.offsets[len(ns.offsets)-1] = ^i +} + +// replaceLastUnquotedName replaces the last name with the provided name. +// +// Invariant: Must call copyQuotedBuffer beforehand. +func (ns *objectNameStack) replaceLastUnquotedName(s string) { + ns.ensureCopiedBuffer() + var startOffset int + if len(ns.offsets) > 1 { + startOffset = ns.offsets[len(ns.offsets)-2] + } + ns.unquotedNames = append(ns.unquotedNames[:startOffset], s...) + ns.offsets[len(ns.offsets)-1] = len(ns.unquotedNames) +} + +// clearLast removes any name in the last JSON object. +// It is semantically equivalent to ns.push followed by ns.pop. +func (ns *objectNameStack) clearLast() { + ns.offsets[len(ns.offsets)-1] = invalidOffset +} + +// pop ascends out of a nested JSON object. +func (ns *objectNameStack) pop() { + ns.offsets = ns.offsets[:len(ns.offsets)-1] +} + +// copyQuotedBuffer copies names from the remote buffer into the local names +// buffer so that there are no more offset references into the remote buffer. +// This allows the remote buffer to change contents without affecting +// the names that this data structure is trying to remember. +func (ns *objectNameStack) copyQuotedBuffer(b []byte) { + // Find the first negative offset. + var i int + for i = len(ns.offsets) - 1; i >= 0 && ns.offsets[i] < 0; i-- { + continue + } + + // Copy each name from the remote buffer into the local buffer. + for i = i + 1; i < len(ns.offsets); i++ { + if i == len(ns.offsets)-1 && ns.offsets[i] == invalidOffset { + if i == 0 { + ns.offsets[i] = 0 + } else { + ns.offsets[i] = ns.offsets[i-1] + } + break // last JSON object had a push without any names + } + + // As a form of Hyrum proofing, we write an invalid character into the + // buffer to make misuse of Decoder.ReadToken more obvious. + // We need to undo that mutation here. + quotedName := b[^ns.offsets[i]:] + if quotedName[0] == invalidateBufferByte { + quotedName[0] = '"' + } + + // Append the unquoted name to the local buffer. + var startOffset int + if i > 0 { + startOffset = ns.offsets[i-1] + } + if n := consumeSimpleString(quotedName); n > 0 { + ns.unquotedNames = append(ns.unquotedNames[:startOffset], quotedName[len(`"`):n-len(`"`)]...) + } else { + ns.unquotedNames, _ = unescapeString(ns.unquotedNames[:startOffset], quotedName) + } + ns.offsets[i] = len(ns.unquotedNames) + } +} + +func (ns *objectNameStack) ensureCopiedBuffer() { + if len(ns.offsets) > 0 && ns.offsets[len(ns.offsets)-1] < 0 { + panic("BUG: copyQuotedBuffer not called beforehand") + } +} + +// objectNamespaceStack is a stack of object namespaces. +// This data structure assists in detecting duplicate names. +type objectNamespaceStack []objectNamespace + +// reset resets the object namespace stack. +func (nss *objectNamespaceStack) reset() { + if cap(*nss) > 1<<10 { + *nss = nil + } + *nss = (*nss)[:0] +} + +// push starts a new namespace for a nested JSON object. +func (nss *objectNamespaceStack) push() { + if cap(*nss) > len(*nss) { + *nss = (*nss)[:len(*nss)+1] + nss.last().reset() + } else { + *nss = append(*nss, objectNamespace{}) + } +} + +// last returns a pointer to the last JSON object namespace. +func (nss objectNamespaceStack) last() *objectNamespace { + return &nss[len(nss)-1] +} + +// pop terminates the namespace for a nested JSON object. +func (nss *objectNamespaceStack) pop() { + *nss = (*nss)[:len(*nss)-1] +} + +// objectNamespace is the namespace for a JSON object. +// In contrast to objectNameStack, this needs to remember a all names +// per JSON object. +// +// The zero value is an empty namespace ready for use. +type objectNamespace struct { + // It relies on a linear search over all the names before switching + // to use a Go map for direct lookup. + + // endOffsets is a list of offsets to the end of each name in buffers. + // The length of offsets is the number of names in the namespace. + endOffsets []uint + // allUnquotedNames is a back-to-back concatenation of every name in the namespace. + allUnquotedNames []byte + // mapNames is a Go map containing every name in the namespace. + // Only valid if non-nil. + mapNames map[string]struct{} +} + +// reset resets the namespace to be empty. +func (ns *objectNamespace) reset() { + ns.endOffsets = ns.endOffsets[:0] + ns.allUnquotedNames = ns.allUnquotedNames[:0] + ns.mapNames = nil + if cap(ns.endOffsets) > 1<<6 { + ns.endOffsets = nil // avoid pinning arbitrarily large amounts of memory + } + if cap(ns.allUnquotedNames) > 1<<10 { + ns.allUnquotedNames = nil // avoid pinning arbitrarily large amounts of memory + } +} + +// length reports the number of names in the namespace. +func (ns *objectNamespace) length() int { + return len(ns.endOffsets) +} + +// getUnquoted retrieves the ith unquoted name in the namespace. +func (ns *objectNamespace) getUnquoted(i int) []byte { + if i == 0 { + return ns.allUnquotedNames[:ns.endOffsets[0]] + } else { + return ns.allUnquotedNames[ns.endOffsets[i-1]:ns.endOffsets[i-0]] + } +} + +// lastUnquoted retrieves the last name in the namespace. +func (ns *objectNamespace) lastUnquoted() []byte { + return ns.getUnquoted(ns.length() - 1) +} + +// insertQuoted inserts a name and reports whether it was inserted, +// which only occurs if name is not already in the namespace. +// The provided name must be a valid JSON string. +func (ns *objectNamespace) insertQuoted(name []byte, isVerbatim bool) bool { + if isVerbatim { + name = name[len(`"`) : len(name)-len(`"`)] + } + return ns.insert(name, !isVerbatim) +} +func (ns *objectNamespace) insertUnquoted(name []byte) bool { + return ns.insert(name, false) +} +func (ns *objectNamespace) insert(name []byte, quoted bool) bool { + var allNames []byte + if quoted { + allNames, _ = unescapeString(ns.allUnquotedNames, name) + } else { + allNames = append(ns.allUnquotedNames, name...) + } + name = allNames[len(ns.allUnquotedNames):] + + // Switch to a map if the buffer is too large for linear search. + // This does not add the current name to the map. + if ns.mapNames == nil && (ns.length() > 64 || len(ns.allUnquotedNames) > 1024) { + ns.mapNames = make(map[string]struct{}) + var startOffset uint + for _, endOffset := range ns.endOffsets { + name := ns.allUnquotedNames[startOffset:endOffset] + ns.mapNames[string(name)] = struct{}{} // allocates a new string + startOffset = endOffset + } + } + + if ns.mapNames == nil { + // Perform linear search over the buffer to find matching names. + // It provides O(n) lookup, but does not require any allocations. + var startOffset uint + for _, endOffset := range ns.endOffsets { + if string(ns.allUnquotedNames[startOffset:endOffset]) == string(name) { + return false + } + startOffset = endOffset + } + } else { + // Use the map if it is populated. + // It provides O(1) lookup, but requires a string allocation per name. + if _, ok := ns.mapNames[string(name)]; ok { + return false + } + ns.mapNames[string(name)] = struct{}{} // allocates a new string + } + + ns.allUnquotedNames = allNames + ns.endOffsets = append(ns.endOffsets, uint(len(ns.allUnquotedNames))) + return true +} + +// removeLast removes the last name in the namespace. +func (ns *objectNamespace) removeLast() { + if ns.mapNames != nil { + delete(ns.mapNames, string(ns.lastUnquoted())) + } + if ns.length()-1 == 0 { + ns.endOffsets = ns.endOffsets[:0] + ns.allUnquotedNames = ns.allUnquotedNames[:0] + } else { + ns.endOffsets = ns.endOffsets[:ns.length()-1] + ns.allUnquotedNames = ns.allUnquotedNames[:ns.endOffsets[ns.length()-1]] + } +} + +type uintSet64 uint64 + +func (s uintSet64) has(i uint) bool { return s&(1< 0 } +func (s *uintSet64) set(i uint) { *s |= 1 << i } + +// uintSet is a set of unsigned integers. +// It is optimized for most integers being close to zero. +type uintSet struct { + lo uintSet64 + hi []uintSet64 +} + +// has reports whether i is in the set. +func (s *uintSet) has(i uint) bool { + if i < 64 { + return s.lo.has(i) + } else { + i -= 64 + iHi, iLo := int(i/64), i%64 + return iHi < len(s.hi) && s.hi[iHi].has(iLo) + } +} + +// insert inserts i into the set and reports whether it was the first insertion. +func (s *uintSet) insert(i uint) bool { + // TODO: Make this inlineable at least for the lower 64-bit case. + if i < 64 { + has := s.lo.has(i) + s.lo.set(i) + return !has + } else { + i -= 64 + iHi, iLo := int(i/64), i%64 + if iHi >= len(s.hi) { + s.hi = append(s.hi, make([]uintSet64, iHi+1-len(s.hi))...) + s.hi = s.hi[:cap(s.hi)] + } + has := s.hi[iHi].has(iLo) + s.hi[iHi].set(iLo) + return !has + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go new file mode 100644 index 000000000..9acba7dad --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go @@ -0,0 +1,522 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "strconv" +) + +// NOTE: Token is analogous to v1 json.Token. + +const ( + maxInt64 = math.MaxInt64 + minInt64 = math.MinInt64 + maxUint64 = math.MaxUint64 + minUint64 = 0 // for consistency and readability purposes + + invalidTokenPanic = "invalid json.Token; it has been voided by a subsequent json.Decoder call" +) + +// Token represents a lexical JSON token, which may be one of the following: +// - a JSON literal (i.e., null, true, or false) +// - a JSON string (e.g., "hello, world!") +// - a JSON number (e.g., 123.456) +// - a start or end delimiter for a JSON object (i.e., { or } ) +// - a start or end delimiter for a JSON array (i.e., [ or ] ) +// +// A Token cannot represent entire array or object values, while a RawValue can. +// There is no Token to represent commas and colons since +// these structural tokens can be inferred from the surrounding context. +type Token struct { + nonComparable + + // Tokens can exist in either a "raw" or an "exact" form. + // Tokens produced by the Decoder are in the "raw" form. + // Tokens returned by constructors are usually in the "exact" form. + // The Encoder accepts Tokens in either the "raw" or "exact" form. + // + // The following chart shows the possible values for each Token type: + // ╔═════════════════╦════════════╤════════════╤════════════╗ + // ║ Token type ║ raw field │ str field │ num field ║ + // ╠═════════════════╬════════════╪════════════╪════════════╣ + // ║ null (raw) ║ "null" │ "" │ 0 ║ + // ║ false (raw) ║ "false" │ "" │ 0 ║ + // ║ true (raw) ║ "true" │ "" │ 0 ║ + // ║ string (raw) ║ non-empty │ "" │ offset ║ + // ║ string (string) ║ nil │ non-empty │ 0 ║ + // ║ number (raw) ║ non-empty │ "" │ offset ║ + // ║ number (float) ║ nil │ "f" │ non-zero ║ + // ║ number (int64) ║ nil │ "i" │ non-zero ║ + // ║ number (uint64) ║ nil │ "u" │ non-zero ║ + // ║ object (delim) ║ "{" or "}" │ "" │ 0 ║ + // ║ array (delim) ║ "[" or "]" │ "" │ 0 ║ + // ╚═════════════════╩════════════╧════════════╧════════════╝ + // + // Notes: + // - For tokens stored in "raw" form, the num field contains the + // absolute offset determined by raw.previousOffsetStart(). + // The buffer itself is stored in raw.previousBuffer(). + // - JSON literals and structural characters are always in the "raw" form. + // - JSON strings and numbers can be in either "raw" or "exact" forms. + // - The exact zero value of JSON strings and numbers in the "exact" forms + // have ambiguous representation. Thus, they are always represented + // in the "raw" form. + + // raw contains a reference to the raw decode buffer. + // If non-nil, then its value takes precedence over str and num. + // It is only valid if num == raw.previousOffsetStart(). + raw *decodeBuffer + + // str is the unescaped JSON string if num is zero. + // Otherwise, it is "f", "i", or "u" if num should be interpreted + // as a float64, int64, or uint64, respectively. + str string + + // num is a float64, int64, or uint64 stored as a uint64 value. + // It is non-zero for any JSON number in the "exact" form. + num uint64 +} + +// TODO: Does representing 1-byte delimiters as *decodeBuffer cause performance issues? + +var ( + Null Token = rawToken("null") + False Token = rawToken("false") + True Token = rawToken("true") + + ObjectStart Token = rawToken("{") + ObjectEnd Token = rawToken("}") + ArrayStart Token = rawToken("[") + ArrayEnd Token = rawToken("]") + + zeroString Token = rawToken(`""`) + zeroNumber Token = rawToken(`0`) + + nanString Token = String("NaN") + pinfString Token = String("Infinity") + ninfString Token = String("-Infinity") +) + +func rawToken(s string) Token { + return Token{raw: &decodeBuffer{buf: []byte(s), prevStart: 0, prevEnd: len(s)}} +} + +// Bool constructs a Token representing a JSON boolean. +func Bool(b bool) Token { + if b { + return True + } + return False +} + +// String constructs a Token representing a JSON string. +// The provided string should contain valid UTF-8, otherwise invalid characters +// may be mangled as the Unicode replacement character. +func String(s string) Token { + if len(s) == 0 { + return zeroString + } + return Token{str: s} +} + +// Float constructs a Token representing a JSON number. +// The values NaN, +Inf, and -Inf will be represented +// as a JSON string with the values "NaN", "Infinity", and "-Infinity". +func Float(n float64) Token { + switch { + case math.Float64bits(n) == 0: + return zeroNumber + case math.IsNaN(n): + return nanString + case math.IsInf(n, +1): + return pinfString + case math.IsInf(n, -1): + return ninfString + } + return Token{str: "f", num: math.Float64bits(n)} +} + +// Int constructs a Token representing a JSON number from an int64. +func Int(n int64) Token { + if n == 0 { + return zeroNumber + } + return Token{str: "i", num: uint64(n)} +} + +// Uint constructs a Token representing a JSON number from a uint64. +func Uint(n uint64) Token { + if n == 0 { + return zeroNumber + } + return Token{str: "u", num: uint64(n)} +} + +// Clone makes a copy of the Token such that its value remains valid +// even after a subsequent Decoder.Read call. +func (t Token) Clone() Token { + // TODO: Allow caller to avoid any allocations? + if raw := t.raw; raw != nil { + // Avoid copying globals. + if t.raw.prevStart == 0 { + switch t.raw { + case Null.raw: + return Null + case False.raw: + return False + case True.raw: + return True + case ObjectStart.raw: + return ObjectStart + case ObjectEnd.raw: + return ObjectEnd + case ArrayStart.raw: + return ArrayStart + case ArrayEnd.raw: + return ArrayEnd + } + } + + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + // TODO(https://go.dev/issue/45038): Use bytes.Clone. + buf := append([]byte(nil), raw.previousBuffer()...) + return Token{raw: &decodeBuffer{buf: buf, prevStart: 0, prevEnd: len(buf)}} + } + return t +} + +// Bool returns the value for a JSON boolean. +// It panics if the token kind is not a JSON boolean. +func (t Token) Bool() bool { + switch t.raw { + case True.raw: + return true + case False.raw: + return false + default: + panic("invalid JSON token kind: " + t.Kind().String()) + } +} + +// appendString appends a JSON string to dst and returns it. +// It panics if t is not a JSON string. +func (t Token) appendString(dst []byte, validateUTF8, preserveRaw bool, escapeRune func(rune) bool) ([]byte, error) { + if raw := t.raw; raw != nil { + // Handle raw string value. + buf := raw.previousBuffer() + if Kind(buf[0]) == '"' { + if escapeRune == nil && consumeSimpleString(buf) == len(buf) { + return append(dst, buf...), nil + } + dst, _, err := reformatString(dst, buf, validateUTF8, preserveRaw, escapeRune) + return dst, err + } + } else if len(t.str) != 0 && t.num == 0 { + // Handle exact string value. + return appendString(dst, t.str, validateUTF8, escapeRune) + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// String returns the unescaped string value for a JSON string. +// For other JSON kinds, this returns the raw JSON representation. +func (t Token) String() string { + // This is inlinable to take advantage of "function outlining". + // This avoids an allocation for the string(b) conversion + // if the caller does not use the string in an escaping manner. + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + s, b := t.string() + if len(b) > 0 { + return string(b) + } + return s +} +func (t Token) string() (string, []byte) { + if raw := t.raw; raw != nil { + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + buf := raw.previousBuffer() + if buf[0] == '"' { + // TODO: Preserve valueFlags in Token? + isVerbatim := consumeSimpleString(buf) == len(buf) + return "", unescapeStringMayCopy(buf, isVerbatim) + } + // Handle tokens that are not JSON strings for fmt.Stringer. + return "", buf + } + if len(t.str) != 0 && t.num == 0 { + return t.str, nil + } + // Handle tokens that are not JSON strings for fmt.Stringer. + if t.num > 0 { + switch t.str[0] { + case 'f': + return string(appendNumber(nil, math.Float64frombits(t.num), 64)), nil + case 'i': + return strconv.FormatInt(int64(t.num), 10), nil + case 'u': + return strconv.FormatUint(uint64(t.num), 10), nil + } + } + return "", nil +} + +// appendNumber appends a JSON number to dst and returns it. +// It panics if t is not a JSON number. +func (t Token) appendNumber(dst []byte, canonicalize bool) ([]byte, error) { + if raw := t.raw; raw != nil { + // Handle raw number value. + buf := raw.previousBuffer() + if Kind(buf[0]).normalize() == '0' { + if !canonicalize { + return append(dst, buf...), nil + } + dst, _, err := reformatNumber(dst, buf, canonicalize) + return dst, err + } + } else if t.num != 0 { + // Handle exact number value. + switch t.str[0] { + case 'f': + return appendNumber(dst, math.Float64frombits(t.num), 64), nil + case 'i': + return strconv.AppendInt(dst, int64(t.num), 10), nil + case 'u': + return strconv.AppendUint(dst, uint64(t.num), 10), nil + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Float returns the floating-point value for a JSON number. +// It returns a NaN, +Inf, or -Inf value for any JSON string +// with the values "NaN", "Infinity", or "-Infinity". +// It panics for all other cases. +func (t Token) Float() float64 { + if raw := t.raw; raw != nil { + // Handle raw number value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + buf := raw.previousBuffer() + if Kind(buf[0]).normalize() == '0' { + fv, _ := parseFloat(buf, 64) + return fv + } + } else if t.num != 0 { + // Handle exact number value. + switch t.str[0] { + case 'f': + return math.Float64frombits(t.num) + case 'i': + return float64(int64(t.num)) + case 'u': + return float64(uint64(t.num)) + } + } + + // Handle string values with "NaN", "Infinity", or "-Infinity". + if t.Kind() == '"' { + switch t.String() { + case "NaN": + return math.NaN() + case "Infinity": + return math.Inf(+1) + case "-Infinity": + return math.Inf(-1) + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Int returns the signed integer value for a JSON number. +// The fractional component of any number is ignored (truncation toward zero). +// Any number beyond the representation of an int64 will be saturated +// to the closest representable value. +// It panics if the token kind is not a JSON number. +func (t Token) Int() int64 { + if raw := t.raw; raw != nil { + // Handle raw integer value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + neg := false + buf := raw.previousBuffer() + if len(buf) > 0 && buf[0] == '-' { + neg, buf = true, buf[1:] + } + if numAbs, ok := parseDecUint(buf); ok { + if neg { + if numAbs > -minInt64 { + return minInt64 + } + return -1 * int64(numAbs) + } else { + if numAbs > +maxInt64 { + return maxInt64 + } + return +1 * int64(numAbs) + } + } + } else if t.num != 0 { + // Handle exact integer value. + switch t.str[0] { + case 'i': + return int64(t.num) + case 'u': + if t.num > maxInt64 { + return maxInt64 + } + return int64(t.num) + } + } + + // Handle JSON number that is a floating-point value. + if t.Kind() == '0' { + switch fv := t.Float(); { + case fv >= maxInt64: + return maxInt64 + case fv <= minInt64: + return minInt64 + default: + return int64(fv) // truncation toward zero + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Uint returns the unsigned integer value for a JSON number. +// The fractional component of any number is ignored (truncation toward zero). +// Any number beyond the representation of an uint64 will be saturated +// to the closest representable value. +// It panics if the token kind is not a JSON number. +func (t Token) Uint() uint64 { + // NOTE: This accessor returns 0 for any negative JSON number, + // which might be surprising, but is at least consistent with the behavior + // of saturating out-of-bounds numbers to the closest representable number. + + if raw := t.raw; raw != nil { + // Handle raw integer value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + neg := false + buf := raw.previousBuffer() + if len(buf) > 0 && buf[0] == '-' { + neg, buf = true, buf[1:] + } + if num, ok := parseDecUint(buf); ok { + if neg { + return minUint64 + } + return num + } + } else if t.num != 0 { + // Handle exact integer value. + switch t.str[0] { + case 'u': + return t.num + case 'i': + if int64(t.num) < minUint64 { + return minUint64 + } + return uint64(int64(t.num)) + } + } + + // Handle JSON number that is a floating-point value. + if t.Kind() == '0' { + switch fv := t.Float(); { + case fv >= maxUint64: + return maxUint64 + case fv <= minUint64: + return minUint64 + default: + return uint64(fv) // truncation toward zero + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + switch { + case t.raw != nil: + raw := t.raw + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + return Kind(t.raw.buf[raw.prevStart]).normalize() + case t.num != 0: + return '0' + case len(t.str) != 0: + return '"' + default: + return invalidKind + } +} + +// Kind represents each possible JSON token kind with a single byte, +// which is conveniently the first byte of that kind's grammar +// with the restriction that numbers always be represented with '0': +// +// - 'n': null +// - 'f': false +// - 't': true +// - '"': string +// - '0': number +// - '{': object start +// - '}': object end +// - '[': array start +// - ']': array end +// +// An invalid kind is usually represented using 0, +// but may be non-zero due to invalid JSON data. +type Kind byte + +const invalidKind Kind = 0 + +// String prints the kind in a humanly readable fashion. +func (k Kind) String() string { + switch k { + case 'n': + return "null" + case 'f': + return "false" + case 't': + return "true" + case '"': + return "string" + case '0': + return "number" + case '{': + return "{" + case '}': + return "}" + case '[': + return "[" + case ']': + return "]" + default: + return "" + } +} + +// normalize coalesces all possible starting characters of a number as just '0'. +func (k Kind) normalize() Kind { + if k == '-' || ('0' <= k && k <= '9') { + return '0' + } + return k +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go new file mode 100644 index 000000000..e0bd1b31d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go @@ -0,0 +1,381 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "io" + "sort" + "sync" + "unicode/utf16" + "unicode/utf8" +) + +// NOTE: RawValue is analogous to v1 json.RawMessage. + +// RawValue represents a single raw JSON value, which may be one of the following: +// - a JSON literal (i.e., null, true, or false) +// - a JSON string (e.g., "hello, world!") +// - a JSON number (e.g., 123.456) +// - an entire JSON object (e.g., {"fizz":"buzz"} ) +// - an entire JSON array (e.g., [1,2,3] ) +// +// RawValue can represent entire array or object values, while Token cannot. +// RawValue may contain leading and/or trailing whitespace. +type RawValue []byte + +// Clone returns a copy of v. +func (v RawValue) Clone() RawValue { + if v == nil { + return nil + } + return append(RawValue{}, v...) +} + +// String returns the string formatting of v. +func (v RawValue) String() string { + if v == nil { + return "null" + } + return string(v) +} + +// IsValid reports whether the raw JSON value is syntactically valid +// according to RFC 7493. +// +// It verifies whether the input is properly encoded as UTF-8, +// that escape sequences within strings decode to valid Unicode codepoints, and +// that all names in each object are unique. +// It does not verify whether numbers are representable within the limits +// of any common numeric type (e.g., float64, int64, or uint64). +func (v RawValue) IsValid() bool { + d := getBufferedDecoder(v, DecodeOptions{}) + defer putBufferedDecoder(d) + _, errVal := d.ReadValue() + _, errEOF := d.ReadToken() + return errVal == nil && errEOF == io.EOF +} + +// Compact removes all whitespace from the raw JSON value. +// +// It does not reformat JSON strings to use any other representation. +// It is guaranteed to succeed if the input is valid. +// If the value is already compacted, then the buffer is not mutated. +func (v *RawValue) Compact() error { + return v.reformat(false, false, "", "") +} + +// Indent reformats the whitespace in the raw JSON value so that each element +// in a JSON object or array begins on a new, indented line beginning with +// prefix followed by one or more copies of indent according to the nesting. +// The value does not begin with the prefix nor any indention, +// to make it easier to embed inside other formatted JSON data. +// +// It does not reformat JSON strings to use any other representation. +// It is guaranteed to succeed if the input is valid. +// If the value is already indented properly, then the buffer is not mutated. +func (v *RawValue) Indent(prefix, indent string) error { + return v.reformat(false, true, prefix, indent) +} + +// Canonicalize canonicalizes the raw JSON value according to the +// JSON Canonicalization Scheme (JCS) as defined by RFC 8785 +// where it produces a stable representation of a JSON value. +// +// The output stability is dependent on the stability of the application data +// (see RFC 8785, Appendix E). It cannot produce stable output from +// fundamentally unstable input. For example, if the JSON value +// contains ephemeral data (e.g., a frequently changing timestamp), +// then the value is still unstable regardless of whether this is called. +// +// Note that JCS treats all JSON numbers as IEEE 754 double precision numbers. +// Any numbers with precision beyond what is representable by that form +// will lose their precision when canonicalized. For example, integer values +// beyond ±2⁵³ will lose their precision. It is recommended that +// int64 and uint64 data types be represented as a JSON string. +// +// It is guaranteed to succeed if the input is valid. +// If the value is already canonicalized, then the buffer is not mutated. +func (v *RawValue) Canonicalize() error { + return v.reformat(true, false, "", "") +} + +// TODO: Instead of implementing the v1 Marshaler/Unmarshaler, +// consider implementing the v2 versions instead. + +// MarshalJSON returns v as the JSON encoding of v. +// It returns the stored value as the raw JSON output without any validation. +// If v is nil, then this returns a JSON null. +func (v RawValue) MarshalJSON() ([]byte, error) { + // NOTE: This matches the behavior of v1 json.RawMessage.MarshalJSON. + if v == nil { + return []byte("null"), nil + } + return v, nil +} + +// UnmarshalJSON sets v as the JSON encoding of b. +// It stores a copy of the provided raw JSON input without any validation. +func (v *RawValue) UnmarshalJSON(b []byte) error { + // NOTE: This matches the behavior of v1 json.RawMessage.UnmarshalJSON. + if v == nil { + return errors.New("json.RawValue: UnmarshalJSON on nil pointer") + } + *v = append((*v)[:0], b...) + return nil +} + +// Kind returns the starting token kind. +// For a valid value, this will never include '}' or ']'. +func (v RawValue) Kind() Kind { + if v := v[consumeWhitespace(v):]; len(v) > 0 { + return Kind(v[0]).normalize() + } + return invalidKind +} + +func (v *RawValue) reformat(canonical, multiline bool, prefix, indent string) error { + var eo EncodeOptions + if canonical { + eo.AllowInvalidUTF8 = false // per RFC 8785, section 3.2.4 + eo.AllowDuplicateNames = false // per RFC 8785, section 3.1 + eo.canonicalizeNumbers = true // per RFC 8785, section 3.2.2.3 + eo.EscapeRune = nil // per RFC 8785, section 3.2.2.2 + eo.multiline = false // per RFC 8785, section 3.2.1 + } else { + if s := trimLeftSpaceTab(prefix); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent prefix") + } + if s := trimLeftSpaceTab(indent); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent") + } + eo.AllowInvalidUTF8 = true + eo.AllowDuplicateNames = true + eo.preserveRawStrings = true + eo.multiline = multiline // in case indent is empty + eo.IndentPrefix = prefix + eo.Indent = indent + } + eo.omitTopLevelNewline = true + + // Write the entire value to reformat all tokens and whitespace. + e := getBufferedEncoder(eo) + defer putBufferedEncoder(e) + if err := e.WriteValue(*v); err != nil { + return err + } + + // For canonical output, we may need to reorder object members. + if canonical { + // Obtain a buffered encoder just to use its internal buffer as + // a scratch buffer in reorderObjects for reordering object members. + e2 := getBufferedEncoder(EncodeOptions{}) + defer putBufferedEncoder(e2) + + // Disable redundant checks performed earlier during encoding. + d := getBufferedDecoder(e.buf, DecodeOptions{AllowInvalidUTF8: true, AllowDuplicateNames: true}) + defer putBufferedDecoder(d) + reorderObjects(d, &e2.buf) // per RFC 8785, section 3.2.3 + } + + // Store the result back into the value if different. + if !bytes.Equal(*v, e.buf) { + *v = append((*v)[:0], e.buf...) + } + return nil +} + +func trimLeftSpaceTab(s string) string { + for i, r := range s { + switch r { + case ' ', '\t': + default: + return s[i:] + } + } + return "" +} + +type memberName struct { + // name is the unescaped name. + name []byte + // before and after are byte offsets into Decoder.buf that represents + // the entire name/value pair. It may contain leading commas. + before, after int64 +} + +var memberNamePool = sync.Pool{New: func() any { return new(memberNames) }} + +func getMemberNames() *memberNames { + ns := memberNamePool.Get().(*memberNames) + *ns = (*ns)[:0] + return ns +} +func putMemberNames(ns *memberNames) { + if cap(*ns) < 1<<10 { + for i := range *ns { + (*ns)[i] = memberName{} // avoid pinning name + } + memberNamePool.Put(ns) + } +} + +type memberNames []memberName + +func (m *memberNames) Len() int { return len(*m) } +func (m *memberNames) Less(i, j int) bool { return lessUTF16((*m)[i].name, (*m)[j].name) } +func (m *memberNames) Swap(i, j int) { (*m)[i], (*m)[j] = (*m)[j], (*m)[i] } + +// reorderObjects recursively reorders all object members in place +// according to the ordering specified in RFC 8785, section 3.2.3. +// +// Pre-conditions: +// - The value is valid (i.e., no decoder errors should ever occur). +// - The value is compact (i.e., no whitespace is present). +// - Initial call is provided a Decoder reading from the start of v. +// +// Post-conditions: +// - Exactly one JSON value is read from the Decoder. +// - All fully-parsed JSON objects are reordered by directly moving +// the members in the value buffer. +// +// The runtime is approximately O(n·log(n)) + O(m·log(m)), +// where n is len(v) and m is the total number of object members. +func reorderObjects(d *Decoder, scratch *[]byte) { + switch tok, _ := d.ReadToken(); tok.Kind() { + case '{': + // Iterate and collect the name and offsets for every object member. + members := getMemberNames() + defer putMemberNames(members) + var prevName []byte + isSorted := true + + beforeBody := d.InputOffset() // offset after '{' + for d.PeekKind() != '}' { + beforeName := d.InputOffset() + var flags valueFlags + name, _ := d.readValue(&flags) + name = unescapeStringMayCopy(name, flags.isVerbatim()) + reorderObjects(d, scratch) + afterValue := d.InputOffset() + + if isSorted && len(*members) > 0 { + isSorted = lessUTF16(prevName, []byte(name)) + } + *members = append(*members, memberName{name, beforeName, afterValue}) + prevName = name + } + afterBody := d.InputOffset() // offset before '}' + d.ReadToken() + + // Sort the members; return early if it's already sorted. + if isSorted { + return + } + // TODO(https://go.dev/issue/47619): Use slices.Sort. + sort.Sort(members) + + // Append the reordered members to a new buffer, + // then copy the reordered members back over the original members. + // Avoid swapping in place since each member may be a different size + // where moving a member over a smaller member may corrupt the data + // for subsequent members before they have been moved. + // + // The following invariant must hold: + // sum([m.after-m.before for m in members]) == afterBody-beforeBody + sorted := (*scratch)[:0] + for i, member := range *members { + if d.buf[member.before] == ',' { + member.before++ // trim leading comma + } + sorted = append(sorted, d.buf[member.before:member.after]...) + if i < len(*members)-1 { + sorted = append(sorted, ',') // append trailing comma + } + } + if int(afterBody-beforeBody) != len(sorted) { + panic("BUG: length invariant violated") + } + copy(d.buf[beforeBody:afterBody], sorted) + + // Update scratch buffer to the largest amount ever used. + if len(sorted) > len(*scratch) { + *scratch = sorted + } + case '[': + for d.PeekKind() != ']' { + reorderObjects(d, scratch) + } + d.ReadToken() + } +} + +// lessUTF16 reports whether x is lexicographically less than y according +// to the UTF-16 codepoints of the UTF-8 encoded input strings. +// This implements the ordering specified in RFC 8785, section 3.2.3. +// The inputs must be valid UTF-8, otherwise this may panic. +func lessUTF16[Bytes []byte | string](x, y Bytes) bool { + // NOTE: This is an optimized, allocation-free implementation + // of lessUTF16Simple in fuzz_test.go. FuzzLessUTF16 verifies that the + // two implementations agree on the result of comparing any two strings. + + isUTF16Self := func(r rune) bool { + return ('\u0000' <= r && r <= '\uD7FF') || ('\uE000' <= r && r <= '\uFFFF') + } + + var invalidUTF8 bool + x0, y0 := x, y + for { + if len(x) == 0 || len(y) == 0 { + if len(x) == len(y) && invalidUTF8 { + return string(x0) < string(y0) + } + return len(x) < len(y) + } + + // ASCII fast-path. + if x[0] < utf8.RuneSelf || y[0] < utf8.RuneSelf { + if x[0] != y[0] { + return x[0] < y[0] + } + x, y = x[1:], y[1:] + continue + } + + // Decode next pair of runes as UTF-8. + // TODO(https://go.dev/issue/56948): Use a generic implementation + // of utf8.DecodeRune, or rely on a compiler optimization to statically + // hide the cost of a type switch (https://go.dev/issue/57072). + var rx, ry rune + var nx, ny int + switch any(x).(type) { + case string: + rx, nx = utf8.DecodeRuneInString(string(x)) + ry, ny = utf8.DecodeRuneInString(string(y)) + case []byte: + rx, nx = utf8.DecodeRune([]byte(x)) + ry, ny = utf8.DecodeRune([]byte(y)) + } + + selfx := isUTF16Self(rx) + selfy := isUTF16Self(ry) + switch { + // The x rune is a single UTF-16 codepoint, while + // the y rune is a surrogate pair of UTF-16 codepoints. + case selfx && !selfy: + ry, _ = utf16.EncodeRune(ry) + // The y rune is a single UTF-16 codepoint, while + // the x rune is a surrogate pair of UTF-16 codepoints. + case selfy && !selfx: + rx, _ = utf16.EncodeRune(rx) + } + if rx != ry { + return rx < ry + } + invalidUTF8 = invalidUTF8 || (rx == utf8.RuneError && nx == 1) || (ry == utf8.RuneError && ny == 1) + x, y = x[nx:], y[ny:] + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go new file mode 100644 index 000000000..61141a500 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go @@ -0,0 +1,260 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema. +// - models: a map from definition name to OpenAPI V3 structural schema for each definition. +// Key in map is used to resolve references in the schema. +// - preserveUnknownFields: flag indicating whether unknown fields in all schemas should be preserved. +// - returns: nil and an error if there is a parse error, or if schema does not satisfy a +// required structural schema invariant for conversion. If no error, returns +// a new smd schema. +// +// Schema should be validated as structural before using with this function, or +// there may be information lost. +func ToSchemaFromOpenAPI(models map[string]*spec.Schema, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + + for name, spec := range models { + // Skip/Ignore top-level references + if len(spec.Ref.String()) > 0 { + continue + } + + var a schema.Atom + + // Hard-coded schemas for now as proto_models implementation functions. + // https://github.com/kubernetes/kube-openapi/issues/364 + if name == quantityResource { + a = schema.Atom{ + Scalar: untypedDef.Atom.Scalar, + } + } else if name == rawExtensionResource { + a = untypedDef.Atom + } else { + c2 := c.push(name, &a) + c2.visitSpec(spec) + c.pop(c2) + } + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) visitSpec(m *spec.Schema) { + // Check if this schema opts its descendants into preserve-unknown-fields + if p, ok := m.Extensions["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + c.preserveUnknownFields = true + } + a := c.top() + *a = c.parseSchema(m) +} + +func (c *convert) parseSchema(m *spec.Schema) schema.Atom { + // k8s-generated OpenAPI specs have historically used only one value for + // type and starting with OpenAPIV3 it is only allowed to be + // a single string. + typ := "" + if len(m.Type) > 0 { + typ = m.Type[0] + } + + // Structural Schemas produced by kubernetes follow very specific rules which + // we can use to infer the SMD type: + switch typ { + case "": + // According to Swagger docs: + // https://swagger.io/docs/specification/data-models/data-types/#any + // + // If no type is specified, it is equivalent to accepting any type. + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: c.parseList(m), + Map: c.parseObject(m), + } + + case "object": + return schema.Atom{ + Map: c.parseObject(m), + } + case "array": + return schema.Atom{ + List: c.parseList(m), + } + case "integer", "boolean", "number", "string": + return convertPrimitive(typ, m.Format) + default: + c.reportError("unrecognized type: '%v'", typ) + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + } + } +} + +func (c *convert) makeOpenAPIRef(specSchema *spec.Schema) schema.TypeRef { + refString := specSchema.Ref.String() + + // Special-case handling for $ref stored inside a single-element allOf + if len(refString) == 0 && len(specSchema.AllOf) == 1 && len(specSchema.AllOf[0].Ref.String()) > 0 { + refString = specSchema.AllOf[0].Ref.String() + } + + if _, n := path.Split(refString); len(n) > 0 { + //!TODO: Refactor the field ElementRelationship override + // we can generate the types with overrides ahead of time rather than + // requiring the hacky runtime support + // (could just create a normalized key struct containing all customizations + // to deduplicate) + mapRelationship, err := getMapElementRelationship(specSchema.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + if len(mapRelationship) > 0 { + return schema.TypeRef{ + NamedType: &n, + ElementRelationship: &mapRelationship, + } + } + + return schema.TypeRef{ + NamedType: &n, + } + + } + var inlined schema.Atom + + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &inlined) + c2.preserveUnknownFields = c.preserveUnknownFields + c2.visitSpec(specSchema) + c.pop(c2) + + return schema.TypeRef{ + Inlined: inlined, + } +} + +func (c *convert) parseObject(s *spec.Schema) *schema.Map { + var fields []schema.StructField + for name, member := range s.Properties { + fields = append(fields, schema.StructField{ + Name: name, + Type: c.makeOpenAPIRef(&member), + Default: member.Default, + }) + } + + // AdditionalProperties informs the schema of any "unknown" keys + // Unknown keys are enforced by the ElementType field. + elementType := func() schema.TypeRef { + if s.AdditionalProperties == nil { + // According to openAPI spec, an object without properties and without + // additionalProperties is assumed to be a free-form object. + if c.preserveUnknownFields || len(s.Properties) == 0 { + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + // If properties are specified, do not implicitly allow unknown + // fields + return schema.TypeRef{} + } else if s.AdditionalProperties.Schema != nil { + // Unknown fields use the referred schema + return c.makeOpenAPIRef(s.AdditionalProperties.Schema) + + } else if s.AdditionalProperties.Allows { + // A boolean instead of a schema was provided. Deduce the + // type from the value provided at runtime. + return schema.TypeRef{ + NamedType: &deducedName, + } + } else { + // Additional Properties are explicitly disallowed by the user. + // Ensure element type is empty. + return schema.TypeRef{} + } + }() + + relationship, err := getMapElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + return &schema.Map{ + Fields: fields, + ElementRelationship: relationship, + ElementType: elementType, + } +} + +func (c *convert) parseList(s *spec.Schema) *schema.List { + relationship, mapKeys, err := getListElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + elementType := func() schema.TypeRef { + if s.Items != nil { + if s.Items.Schema == nil || s.Items.Len() != 1 { + c.reportError("structural schema arrays must have exactly one member subtype") + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + subSchema := s.Items.Schema + if subSchema == nil { + subSchema = &s.Items.Schemas[0] + } + return c.makeOpenAPIRef(subSchema) + } else if len(s.Type) > 0 && len(s.Type[0]) > 0 { + c.reportError("`items` must be specified on arrays") + } + + // A list with no items specified is treated as "untyped". + return schema.TypeRef{ + NamedType: &untypedName, + } + + }() + + return &schema.List{ + ElementRelationship: relationship, + Keys: mapKeys, + ElementType: elementType, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go new file mode 100644 index 000000000..2c6fd76a9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + for _, name := range models.ListModels() { + model := models.LookupModel(name) + + var a schema.Atom + c2 := c.push(name, &a) + model.Accept(c2) + c.pop(c2) + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + + mapRelationship, err := getMapElementRelationship(model.GetExtensions()) + + if err != nil { + c.reportError(err.Error()) + } + + // empty string means unset. + if len(mapRelationship) > 0 { + tr.ElementRelationship = &mapRelationship + } + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + a.Map.ElementRelationship, err = getMapElementRelationship(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } +} + +func (c *convert) VisitArray(a *proto.Array) { + relationship, mapKeys, err := getListElementRelationship(a.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + atom := c.top() + atom.List = &schema.List{ + ElementType: c.makeRef(a.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + Keys: mapKeys, + } +} + +func (c *convert) VisitMap(m *proto.Map) { + relationship, err := getMapElementRelationship(m.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + a := c.top() + a.Map = &schema.Map{ + ElementType: c.makeRef(m.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + } +} + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + if c.currentName == quantityResource { + a.Scalar = ptr(schema.Scalar("untyped")) + } else { + *a = convertPrimitive(p.Type, p.Format) + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = deducedDef.Atom +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 35241fde6..9887d185b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -17,43 +17,18 @@ limitations under the License. package schemaconv import ( - "errors" "fmt" - "path" "sort" - "strings" - "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/v4/schema" ) const ( - quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + rawExtensionResource = "io.k8s.apimachinery.pkg.runtime.RawExtension" ) -// ToSchema converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2). -func ToSchema(models proto.Models) (*schema.Schema, error) { - return ToSchemaWithPreserveUnknownFields(models, false) -} - -// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. -func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { - c := convert{ - input: models, - preserveUnknownFields: preserveUnknownFields, - output: &schema.Schema{}, - } - if err := c.convertAll(); err != nil { - return nil, err - } - c.addCommonTypes() - return c.output, nil -} - type convert struct { - input proto.Models preserveUnknownFields bool output *schema.Schema @@ -64,7 +39,6 @@ type convert struct { func (c *convert) push(name string, a *schema.Atom) *convert { return &convert{ - input: c.input, preserveUnknownFields: c.preserveUnknownFields, output: c.output, currentName: name, @@ -78,30 +52,17 @@ func (c *convert) pop(c2 *convert) { c.errorMessages = append(c.errorMessages, c2.errorMessages...) } -func (c *convert) convertAll() error { - for _, name := range c.input.ListModels() { - model := c.input.LookupModel(name) - c.insertTypeDef(name, model) - } - if len(c.errorMessages) > 0 { - return errors.New(strings.Join(c.errorMessages, "\n")) - } - return nil -} - func (c *convert) reportError(format string, args ...interface{}) { c.errorMessages = append(c.errorMessages, c.currentName+": "+fmt.Sprintf(format, args...), ) } -func (c *convert) insertTypeDef(name string, model proto.Schema) { +func (c *convert) insertTypeDef(name string, atom schema.Atom) { def := schema.TypeDef{ Name: name, + Atom: atom, } - c2 := c.push(name, &def.Atom) - model.Accept(c2) - c.pop(c2) if def.Atom == (schema.Atom{}) { // This could happen if there were a top-level reference. return @@ -156,32 +117,6 @@ var deducedDef schema.TypeDef = schema.TypeDef{ }, } -func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { - var tr schema.TypeRef - if r, ok := model.(*proto.Ref); ok { - if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { - return schema.TypeRef{ - NamedType: &untypedName, - } - } - // reference a named type - _, n := path.Split(r.Reference()) - tr.NamedType = &n - } else { - // compute the type inline - c2 := c.push("inlined in "+c.currentName, &tr.Inlined) - c2.preserveUnknownFields = preserveUnknownFields - model.Accept(c2) - c.pop(c2) - - if tr == (schema.TypeRef{}) { - // emit warning? - tr.NamedType = &untypedName - } - } - return tr -} - func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { schemaUnions := []schema.Union{} if iunions, ok := extensions["x-kubernetes-unions"]; ok { @@ -279,58 +214,9 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { } } - if union.Discriminator != nil && len(union.Fields) == 0 { - return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) - } return union, nil } -func (c *convert) VisitKind(k *proto.Kind) { - preserveUnknownFields := c.preserveUnknownFields - if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { - preserveUnknownFields = true - } - - a := c.top() - a.Map = &schema.Map{} - for _, name := range k.FieldOrder { - member := k.Fields[name] - tr := c.makeRef(member, preserveUnknownFields) - a.Map.Fields = append(a.Map.Fields, schema.StructField{ - Name: name, - Type: tr, - Default: member.GetDefault(), - }) - } - - unions, err := makeUnions(k.GetExtensions()) - if err != nil { - c.reportError(err.Error()) - return - } - // TODO: We should check that the fields and discriminator - // specified in the union are actual fields in the struct. - a.Map.Unions = unions - - if preserveUnknownFields { - a.Map.ElementType = schema.TypeRef{ - NamedType: &deducedName, - } - } - - ext := k.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { - switch val { - case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable - default: - c.reportError("unknown map type %v", val) - } - } -} - func toStringSlice(o interface{}) (out []string, ok bool) { switch t := o.(type) { case []interface{}: @@ -341,117 +227,108 @@ func toStringSlice(o interface{}) (out []string, ok bool) { } } return out, true + case []string: + return t, true } return nil, false } -func (c *convert) VisitArray(a *proto.Array) { - atom := c.top() - atom.List = &schema.List{ - ElementRelationship: schema.Atomic, - } - l := atom.List - l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) - - ext := a.GetExtensions() +func ptr(s schema.Scalar) *schema.Scalar { return &s } - if val, ok := ext["x-kubernetes-list-type"]; ok { - if val == "atomic" { - l.ElementRelationship = schema.Atomic - } else if val == "set" { - l.ElementRelationship = schema.Associative - } else if val == "map" { - l.ElementRelationship = schema.Associative - if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { - if keyNames, ok := toStringSlice(keys); ok { - l.Keys = keyNames - } else { - c.reportError("uninterpreted map keys: %#v", keys) - } - } else { - c.reportError("missing map keys") - } - } else { - c.reportError("unknown list type %v", val) - l.ElementRelationship = schema.Atomic - } - } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { - if val == "merge" || val == "merge,retainKeys" { - l.ElementRelationship = schema.Associative - if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { - if keyName, ok := key.(string); ok { - l.Keys = []string{keyName} - } else { - c.reportError("uninterpreted merge key: %#v", key) - } - } else { - // It's not an error for this to be absent, it - // means it's a set. - } - } else if val == "retainKeys" { - } else { - c.reportError("unknown patch strategy %v", val) - l.ElementRelationship = schema.Atomic +// Basic conversion functions to convert OpenAPI schema definitions to +// SMD Schema atoms +func convertPrimitive(typ string, format string) (a schema.Atom) { + switch typ { + case "integer": + a.Scalar = ptr(schema.Numeric) + case "number": + a.Scalar = ptr(schema.Numeric) + case "string": + switch format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } + case "boolean": + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } -} -func (c *convert) VisitMap(m *proto.Map) { - a := c.top() - a.Map = &schema.Map{} - a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + return a +} - ext := m.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { +func getListElementRelationship(ext map[string]any) (schema.ElementRelationship, []string, error) { + if val, ok := ext["x-kubernetes-list-type"]; ok { switch val { case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable + return schema.Atomic, nil, nil + case "set": + return schema.Associative, nil, nil + case "map": + keys, ok := ext["x-kubernetes-list-map-keys"] + + if !ok { + return schema.Associative, nil, fmt.Errorf("missing map keys") + } + + keyNames, ok := toStringSlice(keys) + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted map keys: %#v", keys) + } + + return schema.Associative, keyNames, nil default: - c.reportError("unknown map type %v", val) + return schema.Atomic, nil, fmt.Errorf("unknown list type %v", val) } - } -} + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + switch val { + case "merge", "merge,retainKeys": + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + keyName, ok := key.(string) -func ptr(s schema.Scalar) *schema.Scalar { return &s } + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted merge key: %#v", key) + } -func (c *convert) VisitPrimitive(p *proto.Primitive) { - a := c.top() - if c.currentName == quantityResource { - a.Scalar = ptr(schema.Scalar("untyped")) - } else { - switch p.Type { - case proto.Integer: - a.Scalar = ptr(schema.Numeric) - case proto.Number: - a.Scalar = ptr(schema.Numeric) - case proto.String: - switch p.Format { - case "": - a.Scalar = ptr(schema.String) - case "byte": - // byte really means []byte and is encoded as a string. - a.Scalar = ptr(schema.String) - case "int-or-string": - a.Scalar = ptr(schema.Scalar("untyped")) - case "date-time": - a.Scalar = ptr(schema.Scalar("untyped")) - default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Associative, []string{keyName}, nil } - case proto.Boolean: - a.Scalar = ptr(schema.Boolean) + // It's not an error for x-kubernetes-patch-merge-key to be absent, + // it means it's a set + return schema.Associative, nil, nil + case "retainKeys": + return schema.Atomic, nil, nil default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Atomic, nil, fmt.Errorf("unknown patch strategy %v", val) } } -} -func (c *convert) VisitArbitrary(a *proto.Arbitrary) { - *c.top() = deducedDef.Atom + // Treat as atomic by default + return schema.Atomic, nil, nil } -func (c *convert) VisitReference(proto.Reference) { - // Do nothing, we handle references specially +// Returns map element relationship if specified, or empty string if unspecified +func getMapElementRelationship(ext map[string]any) (schema.ElementRelationship, error) { + val, ok := ext["x-kubernetes-map-type"] + if !ok { + // unset Map element relationship + return "", nil + } + + switch val { + case "atomic": + return schema.Atomic, nil + case "granular": + return schema.Separable, nil + default: + return "", fmt.Errorf("unknown map type %v", val) + } } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/component.go b/vendor/k8s.io/kube-openapi/pkg/spec3/component.go new file mode 100644 index 000000000..c1bb8bc7b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/component.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import "k8s.io/kube-openapi/pkg/validation/spec" + +// Components holds a set of reusable objects for different aspects of the OAS. +// All objects defined within the components object will have no effect on the API +// unless they are explicitly referenced from properties outside the components object. +// +// more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#componentsObject +type Components struct { + // Schemas holds reusable Schema Objects + Schemas map[string]*spec.Schema `json:"schemas,omitempty"` + // SecuritySchemes holds reusable Security Scheme Objects, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject + SecuritySchemes SecuritySchemes `json:"securitySchemes,omitempty"` + // Responses holds reusable Responses Objects + Responses map[string]*Response `json:"responses,omitempty"` + // Parameters holds reusable Parameters Objects + Parameters map[string]*Parameter `json:"parameters,omitempty"` + // Example holds reusable Example objects + Examples map[string]*Example `json:"examples,omitempty"` + // RequestBodies holds reusable Request Body objects + RequestBodies map[string]*RequestBody `json:"requestBodies,omitempty"` + // Links is a map of operations links that can be followed from the response + Links map[string]*Link `json:"links,omitempty"` + // Headers holds a maps of a headers name to its definition + Headers map[string]*Header `json:"headers,omitempty"` + // all fields are defined at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#componentsObject +} + +// SecuritySchemes holds reusable Security Scheme Objects, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject +type SecuritySchemes map[string]*SecurityScheme diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go new file mode 100644 index 000000000..1f62c6e77 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -0,0 +1,105 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type Encoding struct { + EncodingProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON +func (e *Encoding) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } + b1, err := json.Marshal(e.EncodingProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(e.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (e *Encoding) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + EncodingProps encodingPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.EncodingProps = encodingPropsOmitZero(e.EncodingProps) + return opts.MarshalNext(enc, x) +} + +func (e *Encoding) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } + if err := json.Unmarshal(data, &e.EncodingProps); err != nil { + return err + } + if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { + return err + } + return nil +} + +func (e *Encoding) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + EncodingProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.EncodingProps = x.EncodingProps + return nil +} + +type EncodingProps struct { + // Content Type for encoding a specific property + ContentType string `json:"contentType,omitempty"` + // A map allowing additional information to be provided as headers + Headers map[string]*Header `json:"headers,omitempty"` + // Describes how a specific property value will be serialized depending on its type + Style string `json:"style,omitempty"` + // When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect + Explode bool `json:"explode,omitempty"` + // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 + AllowReserved bool `json:"allowReserved,omitempty"` +} + +type encodingPropsOmitZero struct { + ContentType string `json:"contentType,omitempty"` + Headers map[string]*Header `json:"headers,omitempty"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go new file mode 100644 index 000000000..8834a92e6 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -0,0 +1,110 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Example https://swagger.io/specification/#example-object + +type Example struct { + spec.Refable + ExampleProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON +func (e *Example) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } + b1, err := json.Marshal(e.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(e.ExampleProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(e.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} +func (e *Example) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ExampleProps `json:",inline"` + spec.Extensions + } + x.Ref = e.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExampleProps = e.ExampleProps + return opts.MarshalNext(enc, x) +} + +func (e *Example) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } + if err := json.Unmarshal(data, &e.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &e.ExampleProps); err != nil { + return err + } + if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { + return err + } + return nil +} + +func (e *Example) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExampleProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&e.Ref.Ref, x.Extensions); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExampleProps = x.ExampleProps + + return nil +} + +type ExampleProps struct { + // Summary holds a short description of the example + Summary string `json:"summary,omitempty"` + // Description holds a long description of the example + Description string `json:"description,omitempty"` + // Embedded literal example. + Value interface{} `json:"value,omitempty"` + // A URL that points to the literal example. This provides the capability to reference examples that cannot easily be included in JSON or YAML documents. + ExternalValue string `json:"externalValue,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go new file mode 100644 index 000000000..f0515496e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type ExternalDocumentation struct { + ExternalDocumentationProps + spec.VendorExtensible +} + +type ExternalDocumentationProps struct { + // Description is a short description of the target documentation. CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty"` + // URL is the URL for the target documentation. + URL string `json:"url"` +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } + b1, err := json.Marshal(e.ExternalDocumentationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(e.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (e *ExternalDocumentation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ExternalDocumentationProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExternalDocumentationProps = e.ExternalDocumentationProps + return opts.MarshalNext(enc, x) +} + +func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } + if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil { + return err + } + if err := json.Unmarshal(data, &e.VendorExtensible); err != nil { + return err + } + return nil +} + +func (e *ExternalDocumentation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExternalDocumentationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExternalDocumentationProps = x.ExternalDocumentationProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go new file mode 100644 index 000000000..08b6246ce --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -0,0 +1,281 @@ +package spec3 + +import ( + "math/rand" + "strings" + + fuzz "github.com/google/gofuzz" + + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// refChance is the chance that a particular component will use a $ref +// instead of fuzzed. Expressed as a fraction 1/n, currently there is +// a 1/3 chance that a ref will be used. +const refChance = 3 + +const alphaNumChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randAlphanumString() string { + arr := make([]string, rand.Intn(10)+5) + for i := 0; i < len(arr); i++ { + arr[i] = string(alphaNumChars[rand.Intn(len(alphaNumChars))]) + } + return strings.Join(arr, "") +} + +var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ + func(s *string, c fuzz.Continue) { + // All OpenAPI V3 map keys must follow the corresponding + // regex. Note that this restricts the range for all other + // string values as well. + str := randAlphanumString() + *s = str + }, + func(o *OpenAPI, c fuzz.Continue) { + c.FuzzNoCustom(o) + o.Version = "3.0.0" + for i, val := range o.SecurityRequirement { + if val == nil { + o.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + + }, + func(r *interface{}, c fuzz.Continue) { + switch c.Intn(3) { + case 0: + *r = nil + case 1: + n := c.RandString() + "x" + *r = n + case 2: + n := c.Float64() + *r = n + } + }, + func(v **spec.Info, c fuzz.Continue) { + // Info is never nil + *v = &spec.Info{} + c.FuzzNoCustom(*v) + (*v).Title = c.RandString() + "x" + }, + func(v *Paths, c fuzz.Continue) { + c.Fuzz(&v.VendorExtensible) + num := c.Intn(5) + if num > 0 { + v.Paths = make(map[string]*Path) + } + for i := 0; i < num; i++ { + val := Path{} + c.Fuzz(&val) + v.Paths["/"+c.RandString()] = &val + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + switch c.Intn(4) { + case 0: + v.Type = "apiKey" + v.Name = c.RandString() + "x" + switch c.Intn(3) { + case 0: + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + case 1: + v.Type = "http" + case 2: + v.Type = "oauth2" + v.Flows = make(map[string]*OAuthFlow) + flow := OAuthFlow{} + flow.AuthorizationUrl = c.RandString() + "x" + v.Flows["implicit"] = &flow + flow.Scopes = make(map[string]string) + flow.Scopes["foo"] = "bar" + case 3: + v.Type = "openIdConnect" + v.OpenIdConnectUrl = "https://" + c.RandString() + } + v.Scheme = "basic" + }, + func(v *spec.Ref, c fuzz.Continue) { + switch c.Intn(7) { + case 0: + *v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString()) + case 1: + *v = spec.MustCreateRef("#/components/responses/" + randAlphanumString()) + case 2: + *v = spec.MustCreateRef("#/components/headers/" + randAlphanumString()) + case 3: + *v = spec.MustCreateRef("#/components/securitySchemes/" + randAlphanumString()) + case 5: + *v = spec.MustCreateRef("#/components/parameters/" + randAlphanumString()) + case 6: + *v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString()) + } + }, + func(v *Parameter, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ParameterProps) + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + // Header param + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + }, + func(v *RequestBody, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.RequestBodyProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *Header, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.HeaderProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *ResponsesProps, c fuzz.Continue) { + c.Fuzz(&v.Default) + n := c.Intn(5) + for i := 0; i < n; i++ { + r2 := Response{} + c.Fuzz(&r2) + // HTTP Status code in 100-599 Range + code := c.Intn(500) + 100 + v.StatusCodeResponses = make(map[int]*Response) + v.StatusCodeResponses[code] = &r2 + } + }, + func(v *Response, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ResponseProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + // Do not fuzz null values into the array. + for i, val := range v.SecurityRequirement { + if val == nil { + v.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, + func(v *spec.Extensions, c fuzz.Continue) { + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = spec.Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *spec.ExternalDocumentation, c fuzz.Continue) { + c.Fuzz(&v.Description) + v.URL = "https://" + randAlphanumString() + }, + func(v *spec.SchemaURL, c fuzz.Continue) { + *v = spec.SchemaURL("https://" + randAlphanumString()) + }, + func(v *spec.SchemaOrBool, c fuzz.Continue) { + *v = spec.SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &spec.Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v *spec.SchemaOrArray, c fuzz.Continue) { + *v = spec.SchemaOrArray{} + if c.RandBool() { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schema = &schema + } else { + v.Schemas = []spec.Schema{} + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schemas = append(v.Schemas, schema) + } + + } + + }, + func(v *spec.SchemaOrStringArray, c fuzz.Continue) { + if c.RandBool() { + *v = spec.SchemaOrStringArray{} + if c.RandBool() { + c.Fuzz(&v.Property) + } else { + c.Fuzz(&v.Schema) + } + } + }, + func(v *spec.Schema, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Ref) + return + } + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = spec.StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.ExtraProps) + } + + }, +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go new file mode 100644 index 000000000..9ea30628c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Header a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject +// +// Note that this struct is actually a thin wrapper around HeaderProps to make it referable and extensible +type Header struct { + spec.Refable + HeaderProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Header as JSON +func (h *Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(h) + } + b1, err := json.Marshal(h.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (h *Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + HeaderProps headerPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = h.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = headerPropsOmitZero(h.HeaderProps) + return opts.MarshalNext(enc, x) +} + +func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, h) + } + if err := json.Unmarshal(data, &h.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &h.HeaderProps); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + + return nil +} + +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + HeaderProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&h.Ref.Ref, x.Extensions); err != nil { + return err + } + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + return nil +} + +// HeaderProps a struct that describes a header object +type HeaderProps struct { + // Description holds a brief description of the parameter + Description string `json:"description,omitempty"` + // Required determines whether this parameter is mandatory + Required bool `json:"required,omitempty"` + // Deprecated declares this operation to be deprecated + Deprecated bool `json:"deprecated,omitempty"` + // AllowEmptyValue sets the ability to pass empty-valued parameters + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` + // Style describes how the parameter value will be serialized depending on the type of the parameter value + Style string `json:"style,omitempty"` + // Explode when true, parameter values of type array or object generate separate parameters for each value of the array or key-value pair of the map + Explode bool `json:"explode,omitempty"` + // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 + AllowReserved bool `json:"allowReserved,omitempty"` + // Schema holds the schema defining the type used for the parameter + Schema *spec.Schema `json:"schema,omitempty"` + // Content holds a map containing the representations for the parameter + Content map[string]*MediaType `json:"content,omitempty"` + // Example of the header + Example interface{} `json:"example,omitempty"` + // Examples of the header + Examples map[string]*Example `json:"examples,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type headerPropsOmitZero struct { + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go new file mode 100644 index 000000000..47eef1edb --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -0,0 +1,106 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// MediaType a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject +// +// Note that this struct is actually a thin wrapper around MediaTypeProps to make it referable and extensible +type MediaType struct { + MediaTypeProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON +func (m *MediaType) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(m) + } + b1, err := json.Marshal(m.MediaTypeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(m.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (e *MediaType) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + MediaTypeProps mediaTypePropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.MediaTypeProps = mediaTypePropsOmitZero(e.MediaTypeProps) + return opts.MarshalNext(enc, x) +} + +func (m *MediaType) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, m) + } + if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil { + return err + } + if err := json.Unmarshal(data, &m.VendorExtensible); err != nil { + return err + } + return nil +} + +func (m *MediaType) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + MediaTypeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + m.Extensions = internal.SanitizeExtensions(x.Extensions) + m.MediaTypeProps = x.MediaTypeProps + + return nil +} + +// MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject +type MediaTypeProps struct { + // Schema holds the schema defining the type used for the media type + Schema *spec.Schema `json:"schema,omitempty"` + // Example of the media type + Example interface{} `json:"example,omitempty"` + // Examples of the media type. Each example object should match the media type and specific schema if present + Examples map[string]*Example `json:"examples,omitempty"` + // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded + Encoding map[string]*Encoding `json:"encoding,omitempty"` +} + +type mediaTypePropsOmitZero struct { + Schema *spec.Schema `json:"schema,omitzero"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` + Encoding map[string]*Encoding `json:"encoding,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go new file mode 100644 index 000000000..f1e102547 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -0,0 +1,124 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject +// +// Note that this struct is actually a thin wrapper around OperationProps to make it referable and extensible +type Operation struct { + OperationProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Operation as JSON +func (o *Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (o *Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + spec.Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, o) + } + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + OperationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = x.OperationProps + return nil +} + +// OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject +type OperationProps struct { + // Tags holds a list of tags for API documentation control + Tags []string `json:"tags,omitempty"` + // Summary holds a short summary of what the operation does + Summary string `json:"summary,omitempty"` + // Description holds a verbose explanation of the operation behavior + Description string `json:"description,omitempty"` + // ExternalDocs holds additional external documentation for this operation + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + // OperationId holds a unique string used to identify the operation + OperationId string `json:"operationId,omitempty"` + // Parameters a list of parameters that are applicable for this operation + Parameters []*Parameter `json:"parameters,omitempty"` + // RequestBody holds the request body applicable for this operation + RequestBody *RequestBody `json:"requestBody,omitempty"` + // Responses holds the list of possible responses as they are returned from executing this operation + Responses *Responses `json:"responses,omitempty"` + // Deprecated declares this operation to be deprecated + Deprecated bool `json:"deprecated,omitempty"` + // SecurityRequirement holds a declaration of which security mechanisms can be used for this operation + SecurityRequirement []map[string][]string `json:"security,omitempty"` + // Servers contains an alternative server array to service this operation + Servers []*Server `json:"servers,omitempty"` +} + +type operationPropsOmitZero struct { + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + Description string `json:"description,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + OperationId string `json:"operationId,omitempty"` + Parameters []*Parameter `json:"parameters,omitempty"` + RequestBody *RequestBody `json:"requestBody,omitzero"` + Responses *Responses `json:"responses,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + Servers []*Server `json:"servers,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go new file mode 100644 index 000000000..ada7edb63 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -0,0 +1,147 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Parameter a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject +// +// Note that this struct is actually a thin wrapper around ParameterProps to make it referable and extensible +type Parameter struct { + spec.Refable + ParameterProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON +func (p *Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.ParameterProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (p *Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ParameterProps parameterPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParameterProps = parameterPropsOmitZero(p.ParameterProps) + return opts.MarshalNext(enc, x) +} + +func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } + + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.ParameterProps); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + + return nil +} + +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ParameterProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParameterProps = x.ParameterProps + return nil +} + +// ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject +type ParameterProps struct { + // Name holds the name of the parameter + Name string `json:"name,omitempty"` + // In holds the location of the parameter + In string `json:"in,omitempty"` + // Description holds a brief description of the parameter + Description string `json:"description,omitempty"` + // Required determines whether this parameter is mandatory + Required bool `json:"required,omitempty"` + // Deprecated declares this operation to be deprecated + Deprecated bool `json:"deprecated,omitempty"` + // AllowEmptyValue sets the ability to pass empty-valued parameters + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` + // Style describes how the parameter value will be serialized depending on the type of the parameter value + Style string `json:"style,omitempty"` + // Explode when true, parameter values of type array or object generate separate parameters for each value of the array or key-value pair of the map + Explode bool `json:"explode,omitempty"` + // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 + AllowReserved bool `json:"allowReserved,omitempty"` + // Schema holds the schema defining the type used for the parameter + Schema *spec.Schema `json:"schema,omitempty"` + // Content holds a map containing the representations for the parameter + Content map[string]*MediaType `json:"content,omitempty"` + // Example of the parameter's potential value + Example interface{} `json:"example,omitempty"` + // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding + Examples map[string]*Example `json:"examples,omitempty"` +} + +type parameterPropsOmitZero struct { + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go new file mode 100644 index 000000000..16fbbb4dd --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -0,0 +1,263 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject +type Paths struct { + Paths map[string]*Path + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Paths as JSON +func (p *Paths) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]*Path) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (p *Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]*Path) + } + var pi *Path + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *p = Paths{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi := Path{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]*Path) + } + p.Paths[k] = &pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + +// Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject +// +// Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible +type Path struct { + spec.Refable + PathProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Path as JSON +func (p *Path) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.PathProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (p *Path) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + PathProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathProps = p.PathProps + return opts.MarshalNext(enc, x) +} + +func (p *Path) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.PathProps); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return nil +} + +func (p *Path) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + PathProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathProps = x.PathProps + + return nil +} + +// PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject +type PathProps struct { + // Summary holds a summary for all operations in this path + Summary string `json:"summary,omitempty"` + // Description holds a description for all operations in this path + Description string `json:"description,omitempty"` + // Get defines GET operation + Get *Operation `json:"get,omitempty"` + // Put defines PUT operation + Put *Operation `json:"put,omitempty"` + // Post defines POST operation + Post *Operation `json:"post,omitempty"` + // Delete defines DELETE operation + Delete *Operation `json:"delete,omitempty"` + // Options defines OPTIONS operation + Options *Operation `json:"options,omitempty"` + // Head defines HEAD operation + Head *Operation `json:"head,omitempty"` + // Patch defines PATCH operation + Patch *Operation `json:"patch,omitempty"` + // Trace defines TRACE operation + Trace *Operation `json:"trace,omitempty"` + // Servers is an alternative server array to service all operations in this path + Servers []*Server `json:"servers,omitempty"` + // Parameters a list of parameters that are applicable for this operation + Parameters []*Parameter `json:"parameters,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go new file mode 100644 index 000000000..6f8607e40 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -0,0 +1,115 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject +// +// Note that this struct is actually a thin wrapper around RequestBodyProps to make it referable and extensible +type RequestBody struct { + spec.Refable + RequestBodyProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON +func (r *RequestBody) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } + b1, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.RequestBodyProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (r *RequestBody) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + RequestBodyProps requestBodyPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.RequestBodyProps = requestBodyPropsOmitZero(r.RequestBodyProps) + return opts.MarshalNext(enc, x) +} + +func (r *RequestBody) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &r.RequestBodyProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + return nil +} + +// RequestBodyProps describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject +type RequestBodyProps struct { + // Description holds a brief description of the request body + Description string `json:"description,omitempty"` + // Content is the content of the request body. The key is a media type or media type range and the value describes it + Content map[string]*MediaType `json:"content,omitempty"` + // Required determines if the request body is required in the request + Required bool `json:"required,omitempty"` +} + +type requestBodyPropsOmitZero struct { + Description string `json:"description,omitempty"` + Content map[string]*MediaType `json:"content,omitempty"` + Required bool `json:"required,omitzero"` +} + +func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + RequestBodyProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.RequestBodyProps = x.RequestBodyProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go new file mode 100644 index 000000000..73e241fdc --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -0,0 +1,362 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// Responses holds the list of possible responses as they are returned from executing this operation +// +// Note that this struct is actually a thin wrapper around ResponsesProps to make it referable and extensible +type Responses struct { + ResponsesProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (r *Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitzero"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + +func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + return nil +} + +// ResponsesProps holds the list of possible responses as they are returned from executing this operation +type ResponsesProps struct { + // Default holds the documentation of responses other than the ones declared for specific HTTP response codes. Use this field to cover undeclared responses + Default *Response `json:"-"` + // StatusCodeResponses holds a map of any HTTP status code to the response definition + StatusCodeResponses map[int]*Response `json:"-"` +} + +// MarshalJSON is a custom marshal function that knows how to encode ResponsesProps as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]*Response{} + if r.Default != nil { + toser["default"] = r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + if v, ok := res["default"]; ok { + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value + delete(res, "default") + } + for k, v := range res { + // Take all integral keys + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]*Response{} + } + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = &value + } + } + return nil +} + +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *r = Responses{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + r.ResponsesProps.Default = &resp + default: + if nk, err := strconv.Atoi(k); err == nil { + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]*Response{} + } + r.StatusCodeResponses[nk] = &resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + +// Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject +// +// Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible +type Response struct { + spec.Refable + ResponseProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Response as JSON +func (r *Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } + b1, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.ResponseProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + ResponseProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = r.ResponseProps + return opts.MarshalNext(enc, x) +} + +func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + return nil +} + +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ResponseProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps + return nil +} + +// ResponseProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject +type ResponseProps struct { + // Description holds a short description of the response + Description string `json:"description,omitempty"` + // Headers holds a maps of a headers name to its definition + Headers map[string]*Header `json:"headers,omitempty"` + // Content holds a map containing descriptions of potential response payloads + Content map[string]*MediaType `json:"content,omitempty"` + // Links is a map of operations links that can be followed from the response + Links map[string]*Link `json:"links,omitempty"` +} + +// Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object +type Link struct { + spec.Refable + LinkProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode Link as JSON +func (r *Link) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } + b1, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.LinkProps) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (r *Link) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + LinkProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.LinkProps = r.LinkProps + return opts.MarshalNext(enc, x) +} + +func (r *Link) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &r.LinkProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + + return nil +} + +func (l *Link) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + LinkProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&l.Ref.Ref, x.Extensions); err != nil { + return err + } + l.Extensions = internal.SanitizeExtensions(x.Extensions) + l.LinkProps = x.LinkProps + return nil +} + +// LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject +type LinkProps struct { + // OperationId is the name of an existing, resolvable OAS operation + OperationId string `json:"operationId,omitempty"` + // Parameters is a map representing parameters to pass to an operation as specified with operationId or identified via operationRef + Parameters map[string]interface{} `json:"parameters,omitempty"` + // Description holds a description of the link + Description string `json:"description,omitempty"` + // RequestBody is a literal value or expresion to use as a request body when calling the target operation + RequestBody interface{} `json:"requestBody,omitempty"` + // Server holds a server object used by the target operation + Server *Server `json:"server,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go new file mode 100644 index 000000000..dd1e98ed8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -0,0 +1,135 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject +type SecurityScheme struct { + spec.Refable + SecuritySchemeProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON +func (s *SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } + b1, err := json.Marshal(s.SecuritySchemeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + b3, err := json.Marshal(s.Refable) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (s *SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + SecuritySchemeProps `json:",inline"` + spec.Extensions + } + x.Ref = s.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &s.Refable) +} + +// SecuritySchemeProps defines a security scheme that can be used by the operations +type SecuritySchemeProps struct { + // Type of the security scheme + Type string `json:"type,omitempty"` + // Description holds a short description for security scheme + Description string `json:"description,omitempty"` + // Name holds the name of the header, query or cookie parameter to be used + Name string `json:"name,omitempty"` + // In holds the location of the API key + In string `json:"in,omitempty"` + // Scheme holds the name of the HTTP Authorization scheme to be used in the Authorization header + Scheme string `json:"scheme,omitempty"` + // BearerFormat holds a hint to the client to identify how the bearer token is formatted + BearerFormat string `json:"bearerFormat,omitempty"` + // Flows contains configuration information for the flow types supported. + Flows map[string]*OAuthFlow `json:"flows,omitempty"` + // OpenIdConnectUrl holds an url to discover OAuth2 configuration values from + OpenIdConnectUrl string `json:"openIdConnectUrl,omitempty"` +} + +// OAuthFlow contains configuration information for the flow types supported. +type OAuthFlow struct { + OAuthFlowProps + spec.VendorExtensible +} + +// MarshalJSON is a custom marshal function that knows how to encode OAuthFlow as JSON +func (o *OAuthFlow) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OAuthFlowProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *OAuthFlow) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OAuthFlowProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// OAuthFlowProps holds configuration details for a supported OAuth Flow +type OAuthFlowProps struct { + // AuthorizationUrl hold the authorization URL to be used for this flow + AuthorizationUrl string `json:"authorizationUrl,omitempty"` + // TokenUrl holds the token URL to be used for this flow + TokenUrl string `json:"tokenUrl,omitempty"` + // RefreshUrl holds the URL to be used for obtaining refresh tokens + RefreshUrl string `json:"refreshUrl,omitempty"` + // Scopes holds the available scopes for the OAuth2 security scheme + Scopes map[string]string `json:"scopes,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go new file mode 100644 index 000000000..654a42c06 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -0,0 +1,161 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type Server struct { + ServerProps + spec.VendorExtensible +} + +type ServerProps struct { + // Description is a short description of the target documentation. CommonMark syntax MAY be used for rich text representation. + Description string `json:"description,omitempty"` + // URL is the URL for the target documentation. + URL string `json:"url"` + // Variables contains a map between a variable name and its value. The value is used for substitution in the server's URL templeate + Variables map[string]*ServerVariable `json:"variables,omitempty"` +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (s *Server) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } + b1, err := json.Marshal(s.ServerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (s *Server) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerProps = s.ServerProps + return opts.MarshalNext(enc, x) +} + +func (s *Server) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } + + if err := json.Unmarshal(data, &s.ServerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return nil +} + +func (s *Server) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerProps = x.ServerProps + + return nil +} + +type ServerVariable struct { + ServerVariableProps + spec.VendorExtensible +} + +type ServerVariableProps struct { + // Enum is an enumeration of string values to be used if the substitution options are from a limited set + Enum []string `json:"enum,omitempty"` + // Default is the default value to use for substitution, which SHALL be sent if an alternate value is not supplied + Default string `json:"default"` + // Description is a description for the server variable + Description string `json:"description,omitempty"` +} + +// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON +func (s *ServerVariable) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } + b1, err := json.Marshal(s.ServerVariableProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (s *ServerVariable) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerVariableProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerVariableProps = s.ServerVariableProps + return opts.MarshalNext(enc, x) +} + +func (s *ServerVariable) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } + if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return nil +} + +func (s *ServerVariable) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerVariableProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerVariableProps = x.ServerVariableProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go new file mode 100644 index 000000000..5db819c7f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -0,0 +1,75 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec3 + +import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// OpenAPI is an object that describes an API and conforms to the OpenAPI Specification. +type OpenAPI struct { + // Version represents the semantic version number of the OpenAPI Specification that this document uses + Version string `json:"openapi"` + // Info provides metadata about the API + Info *spec.Info `json:"info"` + // Paths holds the available target and operations for the API + Paths *Paths `json:"paths,omitempty"` + // Servers is an array of Server objects which provide connectivity information to a target server + Servers []*Server `json:"servers,omitempty"` + // Components hold various schemas for the specification + Components *Components `json:"components,omitempty"` + // SecurityRequirement holds a declaration of which security mechanisms can be used across the API + SecurityRequirement []map[string][]string `json:"security,omitempty"` + // ExternalDocs holds additional external documentation + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +func (o *OpenAPI) UnmarshalJSON(data []byte) error { + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, &p) + } + return json.Unmarshal(data, &p) +} + +func (o *OpenAPI) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + return json.Marshal(&p) +} + +func (o *OpenAPI) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type OpenAPIOmitZero struct { + Version string `json:"openapi"` + Info *spec.Info `json:"info"` + Paths *Paths `json:"paths,omitzero"` + Servers []*Server `json:"servers,omitempty"` + Components *Components `json:"components,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + } + x := (*OpenAPIOmitZero)(o) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 4abcf9b82..5789e67ab 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - openapi_v2 "github.com/googleapis/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" "gopkg.in/yaml.v2" ) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go new file mode 100644 index 000000000..d9f2896e3 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -0,0 +1,324 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "reflect" + "strings" + + openapi_v3 "github.com/google/gnostic-models/openapiv3" + "gopkg.in/yaml.v3" +) + +// Temporary parse implementation to be used until gnostic->kube-openapi conversion +// is possible. +func NewOpenAPIV3Data(doc *openapi_v3.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + schemas := doc.GetComponents().GetSchemas() + if schemas == nil { + return &definitions, nil + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range schemas.GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range schemas.GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + val := namedSchema.GetValue() + + if val == nil { + continue + } + + if schema, err := definitions.ParseV3SchemaOrReference(namedSchema.GetValue(), &path); err != nil { + return nil, err + } else if schema != nil { + // Schema may be nil if we hit incompleteness in the conversion, + // but not a fatal error + definitions.models[namedSchema.GetName()] = schema + } + } + + return &definitions, nil +} + +func (d *Definitions) ParseV3SchemaReference(s *openapi_v3.Reference, path *Path) (Schema, error) { + base := &BaseSchema{ + Description: s.Description, + } + + if !strings.HasPrefix(s.GetXRef(), "#/components/schemas") { + // Only resolve references to components/schemas. We may add support + // later for other in-spec paths, but otherwise treat unrecognized + // refs as arbitrary/unknown values. + return &Arbitrary{ + BaseSchema: *base, + }, nil + } + + reference := strings.TrimPrefix(s.GetXRef(), "#/components/schemas/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + + return &Ref{ + BaseSchema: BaseSchema{ + Description: s.Description, + }, + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) ParseV3SchemaOrReference(s *openapi_v3.SchemaOrReference, path *Path) (Schema, error) { + var schema Schema + var err error + + switch v := s.GetOneof().(type) { + case *openapi_v3.SchemaOrReference_Reference: + // Any references stored in #!/components/... are bound to refer + // to external documents. This API does not support such a + // feature. + // + // In the weird case that this is a reference to a schema that is + // not external, we attempt to parse anyway + schema, err = d.ParseV3SchemaReference(v.Reference, path) + case *openapi_v3.SchemaOrReference_Schema: + schema, err = d.ParseSchemaV3(v.Schema, path) + default: + panic("unexpected type") + } + + return schema, err +} + +// ParseSchema creates a walkable Schema from an openapi v3 schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchemaV3(s *openapi_v3.Schema, path *Path) (Schema, error) { + switch s.GetType() { + case object: + for _, extension := range s.GetSpecificationExtension() { + if extension.Name == "x-kubernetes-group-version-kind" { + // Objects with x-kubernetes-group-version-kind are always top + // level types. + return d.parseV3Kind(s, path) + } + } + + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return d.parseV3Kind(s, path) + } + return d.parseV3Map(s, path) + case array: + return d.parseV3Array(s, path) + case String, Number, Integer, Boolean: + return d.parseV3Primitive(s, path) + default: + return d.parseV3Arbitrary(s, path) + } +} + +func (d *Definitions) parseV3Kind(s *openapi_v3.Schema, path *Path) (Schema, error) { + if s.GetType() != object { + return nil, newSchemaError(path, "invalid object type") + } else if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + fieldOrder := []string{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + name := namedSchema.GetName() + path := path.FieldPath(name) + fields[name], err = d.ParseV3SchemaOrReference(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + fieldOrder = append(fieldOrder, name) + } + + base, err := d.parseV3BaseSchema(s, path) + if err != nil { + return nil, err + } + + return &Kind{ + BaseSchema: *base, + RequiredFields: s.GetRequired(), + Fields: fields, + FieldOrder: fieldOrder, + }, nil +} + +func (d *Definitions) parseV3Arbitrary(s *openapi_v3.Schema, path *Path) (Schema, error) { + base, err := d.parseV3BaseSchema(s, path) + if err != nil { + return nil, err + } + return &Arbitrary{ + BaseSchema: *base, + }, nil +} + +func (d *Definitions) parseV3Primitive(s *openapi_v3.Schema, path *Path) (Schema, error) { + switch s.GetType() { + case String: // do nothing + case Number: // do nothing + case Integer: // do nothing + case Boolean: // do nothing + default: + // Unsupported primitive type. Treat as arbitrary type + return d.parseV3Arbitrary(s, path) + } + + base, err := d.parseV3BaseSchema(s, path) + if err != nil { + return nil, err + } + + return &Primitive{ + BaseSchema: *base, + Type: s.GetType(), + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseV3Array(s *openapi_v3.Schema, path *Path) (Schema, error) { + if s.GetType() != array { + return nil, newSchemaError(path, `array should have type "array"`) + } else if len(s.GetItems().GetSchemaOrReference()) != 1 { + // This array can have multiple types in it (or no types at all) + // This is not supported by this conversion. + // Just return an arbitrary type + return d.parseV3Arbitrary(s, path) + } + + sub, err := d.ParseV3SchemaOrReference(s.GetItems().GetSchemaOrReference()[0], path) + if err != nil { + return nil, err + } + + base, err := d.parseV3BaseSchema(s, path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: *base, + SubType: sub, + }, nil +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseV3Map(s *openapi_v3.Schema, path *Path) (Schema, error) { + if s.GetType() != object { + return nil, newSchemaError(path, "invalid object type") + } + var sub Schema + + switch p := s.GetAdditionalProperties().GetOneof().(type) { + case *openapi_v3.AdditionalPropertiesItem_Boolean: + // What does this boolean even mean? + base, err := d.parseV3BaseSchema(s, path) + if err != nil { + return nil, err + } + sub = &Arbitrary{ + BaseSchema: *base, + } + case *openapi_v3.AdditionalPropertiesItem_SchemaOrReference: + if schema, err := d.ParseV3SchemaOrReference(p.SchemaOrReference, path); err != nil { + return nil, err + } else { + sub = schema + } + case nil: + // no subtype? + sub = &Arbitrary{} + default: + panic("unrecognized type " + reflect.TypeOf(p).Name()) + } + + base, err := d.parseV3BaseSchema(s, path) + if err != nil { + return nil, err + } + return &Map{ + BaseSchema: *base, + SubType: sub, + }, nil +} + +func parseV3Interface(def *yaml.Node) (interface{}, error) { + if def == nil { + return nil, nil + } + var i interface{} + if err := def.Decode(&i); err != nil { + return nil, err + } + return i, nil +} + +func (d *Definitions) parseV3BaseSchema(s *openapi_v3.Schema, path *Path) (*BaseSchema, error) { + if s == nil { + return nil, fmt.Errorf("cannot initialize BaseSchema from nil") + } + + def, err := parseV3Interface(s.GetDefault().ToRawInfo()) + if err != nil { + return nil, err + } + + return &BaseSchema{ + Description: s.GetDescription(), + Default: def, + Extensions: SpecificationExtensionToMap(s.GetSpecificationExtension()), + Path: *path, + }, nil +} + +func SpecificationExtensionToMap(e []*openapi_v3.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore b/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore new file mode 100644 index 000000000..dd91ed6a0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE b/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go new file mode 100644 index 000000000..f285970aa --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go new file mode 100644 index 000000000..88add91b2 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go new file mode 100644 index 000000000..6a77f2ac8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go @@ -0,0 +1,1517 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec + +import ( + "errors" + "strconv" + + "github.com/go-openapi/jsonreference" + openapi_v2 "github.com/google/gnostic-models/openapiv2" +) + +// Interfaces +type GnosticCommonValidations interface { + GetMaximum() float64 + GetExclusiveMaximum() bool + GetMinimum() float64 + GetExclusiveMinimum() bool + GetMaxLength() int64 + GetMinLength() int64 + GetPattern() string + GetMaxItems() int64 + GetMinItems() int64 + GetUniqueItems() bool + GetMultipleOf() float64 + GetEnum() []*openapi_v2.Any +} + +func (k *CommonValidations) FromGnostic(g GnosticCommonValidations) error { + if g == nil { + return nil + } + + max := g.GetMaximum() + if max != 0 { + k.Maximum = &max + } + + k.ExclusiveMaximum = g.GetExclusiveMaximum() + + min := g.GetMinimum() + if min != 0 { + k.Minimum = &min + } + + k.ExclusiveMinimum = g.GetExclusiveMinimum() + + maxLen := g.GetMaxLength() + if maxLen != 0 { + k.MaxLength = &maxLen + } + + minLen := g.GetMinLength() + if minLen != 0 { + k.MinLength = &minLen + } + + k.Pattern = g.GetPattern() + + maxItems := g.GetMaxItems() + if maxItems != 0 { + k.MaxItems = &maxItems + } + + minItems := g.GetMinItems() + if minItems != 0 { + k.MinItems = &minItems + } + + k.UniqueItems = g.GetUniqueItems() + + multOf := g.GetMultipleOf() + if multOf != 0 { + k.MultipleOf = &multOf + } + + enums := g.GetEnum() + + if enums != nil { + k.Enum = make([]interface{}, len(enums)) + for i, v := range enums { + if v == nil { + continue + } + + var convert interface{} + if err := v.ToRawInfo().Decode(&convert); err != nil { + return err + } else { + k.Enum[i] = convert + } + } + } + + return nil +} + +type GnosticSimpleSchema interface { + GetType() string + GetFormat() string + GetItems() *openapi_v2.PrimitivesItems + GetCollectionFormat() string + GetDefault() *openapi_v2.Any +} + +func (k *SimpleSchema) FromGnostic(g GnosticSimpleSchema) error { + if g == nil { + return nil + } + + k.Type = g.GetType() + k.Format = g.GetFormat() + k.CollectionFormat = g.GetCollectionFormat() + + items := g.GetItems() + if items != nil { + k.Items = &Items{} + if err := k.Items.FromGnostic(items); err != nil { + return err + } + } + + def := g.GetDefault() + if def != nil { + var convert interface{} + if err := def.ToRawInfo().Decode(&convert); err != nil { + return err + } else { + k.Default = convert + } + } + + return nil +} + +func (k *Items) FromGnostic(g *openapi_v2.PrimitivesItems) error { + if g == nil { + return nil + } + + if err := k.SimpleSchema.FromGnostic(g); err != nil { + return err + } + + if err := k.CommonValidations.FromGnostic(g); err != nil { + return err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return err + } + + return nil +} + +func (k *VendorExtensible) FromGnostic(g []*openapi_v2.NamedAny) error { + if len(g) == 0 { + return nil + } + + k.Extensions = make(Extensions, len(g)) + for _, v := range g { + if v == nil { + continue + } + + if v.Value == nil { + k.Extensions[v.Name] = nil + continue + } + + var iface interface{} + if err := v.Value.ToRawInfo().Decode(&iface); err != nil { + return err + } else { + k.Extensions[v.Name] = iface + } + } + return nil +} + +func (k *Refable) FromGnostic(g string) error { + return k.Ref.FromGnostic(g) +} + +func (k *Ref) FromGnostic(g string) error { + if g == "" { + return nil + } + + ref, err := jsonreference.New(g) + if err != nil { + return err + } + + *k = Ref{ + Ref: ref, + } + + return nil +} + +// Converts a gnostic v2 Document to a kube-openapi Swagger Document +// +// Caveats: +// +// - gnostic v2 documents treats zero as unspecified for numerical fields of +// CommonValidations fields such as Maximum, Minimum, MaximumItems, etc. +// There will always be data loss if one of the values of these fields is set to zero. +// +// Returns: +// +// - `ok`: `false` if a value was present in the gnostic document which cannot be +// roundtripped into kube-openapi types. In these instances, `ok` is set to +// `false` and the value is skipped. +// +// - `err`: an unexpected error occurred in the conversion from the gnostic type +// to kube-openapi type. +func (k *Swagger) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { + ok = true + if g == nil { + return true, nil + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.SwaggerProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *SwaggerProps) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + // openapi_v2.Document does not support "ID" field, so it will not be + // included + k.Consumes = g.Consumes + k.Produces = g.Produces + k.Schemes = g.Schemes + k.Swagger = g.Swagger + + if g.Info != nil { + k.Info = &Info{} + if nok, err := k.Info.FromGnostic(g.Info); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.Host = g.Host + k.BasePath = g.BasePath + + if g.Paths != nil { + k.Paths = &Paths{} + if nok, err := k.Paths.FromGnostic(g.Paths); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Definitions != nil { + k.Definitions = make(Definitions, len(g.Definitions.AdditionalProperties)) + for _, v := range g.Definitions.AdditionalProperties { + if v == nil { + continue + } + converted := Schema{} + if nok, err := converted.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + k.Definitions[v.Name] = converted + + } + } + + if g.Parameters != nil { + k.Parameters = make( + map[string]Parameter, + len(g.Parameters.AdditionalProperties)) + for _, v := range g.Parameters.AdditionalProperties { + if v == nil { + continue + } + p := Parameter{} + if nok, err := p.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Parameters[v.Name] = p + } + } + + if g.Responses != nil { + k.Responses = make( + map[string]Response, + len(g.Responses.AdditionalProperties)) + + for _, v := range g.Responses.AdditionalProperties { + if v == nil { + continue + } + p := Response{} + if nok, err := p.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Responses[v.Name] = p + } + } + + if g.SecurityDefinitions != nil { + k.SecurityDefinitions = make(SecurityDefinitions) + if err := k.SecurityDefinitions.FromGnostic(g.SecurityDefinitions); err != nil { + return false, err + } + } + + if g.Security != nil { + k.Security = make([]map[string][]string, len(g.Security)) + for i, v := range g.Security { + if v == nil || v.AdditionalProperties == nil { + continue + } + + k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) + converted := k.Security[i] + for _, p := range v.AdditionalProperties { + if p == nil { + continue + } + if p.Value != nil { + converted[p.Name] = p.Value.Value + } else { + converted[p.Name] = nil + } + } + } + } + + if g.Tags != nil { + k.Tags = make([]Tag, len(g.Tags)) + for i, v := range g.Tags { + if v == nil { + continue + } else if nok, err := k.Tags[i].FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// Info + +func (k *Info) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { + ok = true + if g == nil { + return true, nil + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.InfoProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *InfoProps) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Description = g.Description + k.Title = g.Title + k.TermsOfService = g.TermsOfService + + if g.Contact != nil { + k.Contact = &ContactInfo{} + + if nok, err := k.Contact.FromGnostic(g.Contact); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.License != nil { + k.License = &License{} + if nok, err := k.License.FromGnostic(g.License); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.Version = g.Version + return ok, nil +} + +func (k *License) FromGnostic(g *openapi_v2.License) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Name = g.Name + k.URL = g.Url + + // License does not embed to VendorExtensible! + // data loss from g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} + +func (k *ContactInfo) FromGnostic(g *openapi_v2.Contact) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Name = g.Name + k.URL = g.Url + k.Email = g.Email + + // ContactInfo does not embed to VendorExtensible! + // data loss from g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} + +// Paths + +func (k *Paths) FromGnostic(g *openapi_v2.Paths) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if g.Path != nil { + k.Paths = make(map[string]PathItem, len(g.Path)) + for _, v := range g.Path { + if v == nil { + continue + } + + converted := PathItem{} + if nok, err := converted.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Paths[v.Name] = converted + } + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *PathItem) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + if nok, err := k.PathItemProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.Refable.FromGnostic(g.XRef); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *PathItemProps) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + if g.Get != nil { + k.Get = &Operation{} + if nok, err := k.Get.FromGnostic(g.Get); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Put != nil { + k.Put = &Operation{} + if nok, err := k.Put.FromGnostic(g.Put); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Post != nil { + k.Post = &Operation{} + if nok, err := k.Post.FromGnostic(g.Post); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Delete != nil { + k.Delete = &Operation{} + if nok, err := k.Delete.FromGnostic(g.Delete); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Options != nil { + k.Options = &Operation{} + if nok, err := k.Options.FromGnostic(g.Options); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Head != nil { + k.Head = &Operation{} + if nok, err := k.Head.FromGnostic(g.Head); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Patch != nil { + k.Patch = &Operation{} + if nok, err := k.Patch.FromGnostic(g.Patch); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Parameters != nil { + k.Parameters = make([]Parameter, len(g.Parameters)) + for i, v := range g.Parameters { + if v == nil { + continue + } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +func (k *Operation) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.OperationProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *OperationProps) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Description = g.Description + k.Consumes = g.Consumes + k.Produces = g.Produces + k.Schemes = g.Schemes + k.Tags = g.Tags + k.Summary = g.Summary + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.ID = g.OperationId + k.Deprecated = g.Deprecated + + if g.Security != nil { + k.Security = make([]map[string][]string, len(g.Security)) + for i, v := range g.Security { + if v == nil || v.AdditionalProperties == nil { + continue + } + + k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) + converted := k.Security[i] + for _, p := range v.AdditionalProperties { + if p == nil { + continue + } + + if p.Value != nil { + converted[p.Name] = p.Value.Value + } else { + converted[p.Name] = nil + } + } + } + } + + if g.Parameters != nil { + k.Parameters = make([]Parameter, len(g.Parameters)) + for i, v := range g.Parameters { + if v == nil { + continue + } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + if g.Responses != nil { + k.Responses = &Responses{} + if nok, err := k.Responses.FromGnostic(g.Responses); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// Responses + +func (k *Responses) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.ResponsesProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *ResponsesProps) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { + if g == nil { + return true, nil + } else if g.ResponseCode == nil { + return ok, nil + } + + ok = true + for _, v := range g.ResponseCode { + if v == nil { + continue + } + if v.Name == "default" { + k.Default = &Response{} + if nok, err := k.Default.FromGnosticResponseValue(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + } else if nk, err := strconv.Atoi(v.Name); err != nil { + // This should actually never fail, unless gnostic struct was + // manually/purposefully tampered with at runtime. + // Gnostic's ParseDocument validates that all StatusCodeResponses + // keys adhere to the following regex ^([0-9]{3})$|^(default)$ + ok = false + } else { + if k.StatusCodeResponses == nil { + k.StatusCodeResponses = map[int]Response{} + } + + res := Response{} + if nok, err := res.FromGnosticResponseValue(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + k.StatusCodeResponses[nk] = res + } + } + + return ok, nil +} + +func (k *Response) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + // Refable case handled in FromGnosticResponseValue + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.ResponseProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *Response) FromGnosticResponseValue(g *openapi_v2.ResponseValue) (ok bool, err error) { + ok = true + if ref := g.GetJsonReference(); ref != nil { + k.Description = ref.Description + + if err := k.Refable.FromGnostic(ref.XRef); err != nil { + return false, err + } + } else if nok, err := k.FromGnostic(g.GetResponse()); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *ResponseProps) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + + if g.Schema != nil { + k.Schema = &Schema{} + if nok, err := k.Schema.FromGnosticSchemaItem(g.Schema); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Headers != nil { + k.Headers = make(map[string]Header, len(g.Headers.AdditionalProperties)) + for _, v := range g.Headers.AdditionalProperties { + if v == nil { + continue + } + + converted := Header{} + if err := converted.FromGnostic(v.GetValue()); err != nil { + return false, err + } + + k.Headers[v.Name] = converted + } + } + + if g.Examples != nil { + k.Examples = make(map[string]interface{}, len(g.Examples.AdditionalProperties)) + for _, v := range g.Examples.AdditionalProperties { + if v == nil { + continue + } else if v.Value == nil { + k.Examples[v.Name] = nil + continue + } + + var iface interface{} + if err := v.Value.ToRawInfo().Decode(&iface); err != nil { + return false, err + } else { + k.Examples[v.Name] = iface + } + } + } + + return ok, nil +} + +// Header + +func (k *Header) FromGnostic(g *openapi_v2.Header) (err error) { + if g == nil { + return nil + } + + if err := k.CommonValidations.FromGnostic(g); err != nil { + return err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return err + } + + if err := k.SimpleSchema.FromGnostic(g); err != nil { + return err + } + + if err := k.HeaderProps.FromGnostic(g); err != nil { + return err + } + + return nil +} + +func (k *HeaderProps) FromGnostic(g *openapi_v2.Header) error { + if g == nil { + return nil + } + + // All other fields of openapi_v2.Header are handled by + // the embeded fields, commonvalidations, etc. + k.Description = g.Description + return nil +} + +// Parameters + +func (k *Parameter) FromGnostic(g *openapi_v2.Parameter) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + switch p := g.Oneof.(type) { + case *openapi_v2.Parameter_BodyParameter: + if nok, err := k.ParamProps.FromGnostic(p.BodyParameter); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.VendorExtensible.FromGnostic(p.BodyParameter.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.Parameter_NonBodyParameter: + switch nb := g.GetNonBodyParameter().Oneof.(type) { + case *openapi_v2.NonBodyParameter_HeaderParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.HeaderParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_FormDataParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.FormDataParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_QueryParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.QueryParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_PathParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.PathParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + default: + return false, errors.New("unrecognized nonbody type for Parameter") + } + default: + return false, errors.New("unrecognized type for Parameter") + } +} + +type GnosticCommonParamProps interface { + GetName() string + GetRequired() bool + GetIn() string + GetDescription() string +} + +type GnosticCommonParamPropsBodyParameter interface { + GetSchema() *openapi_v2.Schema +} + +type GnosticCommonParamPropsFormData interface { + GetAllowEmptyValue() bool +} + +func (k *ParamProps) FromGnostic(g GnosticCommonParamProps) (ok bool, err error) { + ok = true + k.Description = g.GetDescription() + k.In = g.GetIn() + k.Name = g.GetName() + k.Required = g.GetRequired() + + if formDataParameter, success := g.(GnosticCommonParamPropsFormData); success { + k.AllowEmptyValue = formDataParameter.GetAllowEmptyValue() + } + + if bodyParameter, success := g.(GnosticCommonParamPropsBodyParameter); success { + if bodyParameter.GetSchema() != nil { + k.Schema = &Schema{} + if nok, err := k.Schema.FromGnostic(bodyParameter.GetSchema()); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +// PB types use a different structure than we do for "refable". For PB, there is +// a wrappign oneof type that could be a ref or the type +func (k *Parameter) FromGnosticParametersItem(g *openapi_v2.ParametersItem) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + if ref := g.GetJsonReference(); ref != nil { + k.Description = ref.Description + + if err := k.Refable.FromGnostic(ref.XRef); err != nil { + return false, err + } + } else if nok, err := k.FromGnostic(g.GetParameter()); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +// Schema + +func (k *Schema) FromGnostic(g *openapi_v2.Schema) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + // SwaggerSchemaProps + k.Discriminator = g.Discriminator + k.ReadOnly = g.ReadOnly + k.Description = g.Description + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Example != nil { + if err := g.Example.ToRawInfo().Decode(&k.Example); err != nil { + return false, err + } + } + + // SchemaProps + if err := k.Ref.FromGnostic(g.XRef); err != nil { + return false, err + } + k.Type = g.Type.GetValue() + k.Format = g.GetFormat() + k.Title = g.GetTitle() + + // These below fields are not available in gnostic types, so will never + // be populated. This means roundtrips which make use of these + // (non-official, kube-only) fields will lose information. + // + // Schema.ID is not available in official spec + // Schema.$schema + // Schema.Nullable - in openapiv3, not v2 + // Schema.AnyOf - in openapiv3, not v2 + // Schema.OneOf - in openapiv3, not v2 + // Schema.Not - in openapiv3, not v2 + // Schema.PatternProperties - in openapiv3, not v2 + // Schema.Dependencies - in openapiv3, not v2 + // Schema.AdditionalItems + // Schema.Definitions - not part of spec + // Schema.ExtraProps - gnostic parser rejects any keys it does not recognize + + if g.GetDefault() != nil { + if err := g.GetDefault().ToRawInfo().Decode(&k.Default); err != nil { + return false, err + } + } + + // These conditionals (!= 0) follow gnostic's logic for ToRawInfo + // The keys in gnostic source are only included if nonzero. + + if g.Maximum != 0.0 { + k.Maximum = &g.Maximum + } + + if g.Minimum != 0.0 { + k.Minimum = &g.Minimum + } + + k.ExclusiveMaximum = g.ExclusiveMaximum + k.ExclusiveMinimum = g.ExclusiveMinimum + + if g.MaxLength != 0 { + k.MaxLength = &g.MaxLength + } + + if g.MinLength != 0 { + k.MinLength = &g.MinLength + } + + k.Pattern = g.GetPattern() + + if g.MaxItems != 0 { + k.MaxItems = &g.MaxItems + } + + if g.MinItems != 0 { + k.MinItems = &g.MinItems + } + k.UniqueItems = g.UniqueItems + + if g.MultipleOf != 0 { + k.MultipleOf = &g.MultipleOf + } + + for _, v := range g.GetEnum() { + if v == nil { + continue + } + + var convert interface{} + if err := v.ToRawInfo().Decode(&convert); err != nil { + return false, err + } + k.Enum = append(k.Enum, convert) + } + + if g.MaxProperties != 0 { + k.MaxProperties = &g.MaxProperties + } + + if g.MinProperties != 0 { + k.MinProperties = &g.MinProperties + } + + k.Required = g.Required + + if g.GetItems() != nil { + k.Items = &SchemaOrArray{} + for _, v := range g.Items.GetSchema() { + if v == nil { + continue + } + + schema := Schema{} + if nok, err := schema.FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + k.Items.Schemas = append(k.Items.Schemas, schema) + } + + if len(k.Items.Schemas) == 1 { + k.Items.Schema = &k.Items.Schemas[0] + k.Items.Schemas = nil + } + } + + for i, v := range g.GetAllOf() { + if v == nil { + continue + } + + k.AllOf = append(k.AllOf, Schema{}) + if nok, err := k.AllOf[i].FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Properties != nil { + k.Properties = make(map[string]Schema) + for _, namedSchema := range g.Properties.AdditionalProperties { + if namedSchema == nil { + continue + } + val := &Schema{} + if nok, err := val.FromGnostic(namedSchema.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Properties[namedSchema.Name] = *val + } + } + + if g.AdditionalProperties != nil { + k.AdditionalProperties = &SchemaOrBool{} + if g.AdditionalProperties.GetSchema() == nil { + k.AdditionalProperties.Allows = g.AdditionalProperties.GetBoolean() + } else { + k.AdditionalProperties.Schema = &Schema{} + k.AdditionalProperties.Allows = true + + if nok, err := k.AdditionalProperties.Schema.FromGnostic(g.AdditionalProperties.GetSchema()); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +func (k *Schema) FromGnosticSchemaItem(g *openapi_v2.SchemaItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + switch p := g.Oneof.(type) { + case *openapi_v2.SchemaItem_FileSchema: + fileSchema := p.FileSchema + + if err := k.VendorExtensible.FromGnostic(fileSchema.VendorExtension); err != nil { + return false, err + } + + k.Format = fileSchema.Format + k.Title = fileSchema.Title + k.Description = fileSchema.Description + k.Required = fileSchema.Required + k.Type = []string{fileSchema.Type} + k.ReadOnly = fileSchema.ReadOnly + + if fileSchema.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(fileSchema.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if fileSchema.Example != nil { + if err := fileSchema.Example.ToRawInfo().Decode(&k.Example); err != nil { + return false, err + } + } + + if fileSchema.Default != nil { + if err := fileSchema.Default.ToRawInfo().Decode(&k.Default); err != nil { + return false, err + } + } + + case *openapi_v2.SchemaItem_Schema: + schema := p.Schema + + if nok, err := k.FromGnostic(schema); err != nil { + return false, err + } else if !nok { + ok = false + } + default: + return false, errors.New("unrecognized type for SchemaItem") + } + + return ok, nil +} + +// SecurityDefinitions + +func (k SecurityDefinitions) FromGnostic(g *openapi_v2.SecurityDefinitions) error { + for _, v := range g.GetAdditionalProperties() { + if v == nil { + continue + } + secScheme := &SecurityScheme{} + if err := secScheme.FromGnostic(v.Value); err != nil { + return err + } + k[v.Name] = secScheme + } + + return nil +} + +type GnosticCommonSecurityDefinition interface { + GetType() string + GetDescription() string +} + +func (k *SecuritySchemeProps) FromGnostic(g GnosticCommonSecurityDefinition) error { + k.Type = g.GetType() + k.Description = g.GetDescription() + + if hasName, success := g.(interface{ GetName() string }); success { + k.Name = hasName.GetName() + } + + if hasIn, success := g.(interface{ GetIn() string }); success { + k.In = hasIn.GetIn() + } + + if hasFlow, success := g.(interface{ GetFlow() string }); success { + k.Flow = hasFlow.GetFlow() + } + + if hasAuthURL, success := g.(interface{ GetAuthorizationUrl() string }); success { + k.AuthorizationURL = hasAuthURL.GetAuthorizationUrl() + } + + if hasTokenURL, success := g.(interface{ GetTokenUrl() string }); success { + k.TokenURL = hasTokenURL.GetTokenUrl() + } + + if hasScopes, success := g.(interface { + GetScopes() *openapi_v2.Oauth2Scopes + }); success { + scopes := hasScopes.GetScopes() + if scopes != nil { + k.Scopes = make(map[string]string, len(scopes.AdditionalProperties)) + for _, v := range scopes.AdditionalProperties { + if v == nil { + continue + } + + k.Scopes[v.Name] = v.Value + } + } + } + + return nil +} + +func (k *SecurityScheme) FromGnostic(g *openapi_v2.SecurityDefinitionsItem) error { + if g == nil { + return nil + } + + switch s := g.Oneof.(type) { + case *openapi_v2.SecurityDefinitionsItem_ApiKeySecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.ApiKeySecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.ApiKeySecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_BasicAuthenticationSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.BasicAuthenticationSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.BasicAuthenticationSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2AccessCodeSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2AccessCodeSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2ApplicationSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ApplicationSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2ApplicationSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2ImplicitSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ImplicitSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2ImplicitSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2PasswordSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2PasswordSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2PasswordSecurity.VendorExtension); err != nil { + return err + } + return nil + default: + return errors.New("unrecognized SecurityDefinitionsItem") + } +} + +// Tag + +func (k *Tag) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if nok, err := k.TagProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *TagProps) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + k.Name = g.Name + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// ExternalDocumentation + +func (k *ExternalDocumentation) FromGnostic(g *openapi_v2.ExternalDocs) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + k.URL = g.Url + + // data loss! g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go new file mode 100644 index 000000000..05310c46b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go @@ -0,0 +1,118 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(h) + } + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + b4, err := json.Marshal(h.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3, b4), nil +} + +func (h Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Extensions + HeaderProps + } + x.CommonValidations = commonValidationsOmitZero(h.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(h.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = h.HeaderProps + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, h) + } + + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + HeaderProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + h.CommonValidations = x.CommonValidations + h.SimpleSchema = x.SimpleSchema + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go new file mode 100644 index 000000000..d667b705b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go @@ -0,0 +1,219 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// GetObject gets the object value from the extensions. +// out must be a json serializable type; the json go struct +// tags of out are used to populate it. +func (e Extensions) GetObject(key string, out interface{}) error { + // This json serialization/deserialization could be replaced with + // an approach using reflection if the optimization becomes justified. + if v, ok := e[strings.ToLower(key)]; ok { + b, err := json.Marshal(v) + if err != nil { + return err + } + err = json.Unmarshal(b, out) + if err != nil { + return err + } + } + return nil +} + +func (e Extensions) sanitizeWithExtra() (extra map[string]any) { + for k, v := range e { + if !internal.IsExtensionKey(k) { + if extra == nil { + extra = make(map[string]any) + } + extra[k] = v + delete(e, k) + } + } + return extra +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (i Info) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + InfoProps + } + x.Extensions = i.Extensions + x.InfoProps = i.InfoProps + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, i) + } + + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} + +func (i *Info) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + InfoProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + i.Extensions = internal.SanitizeExtensions(x.Extensions) + i.InfoProps = x.InfoProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go new file mode 100644 index 000000000..4132467d2 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go @@ -0,0 +1,180 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type simpleSchemaOmitZero struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitzero"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type commonValidationsOmitZero struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, i) + } + + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +func (i *Items) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := i.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + + i.CommonValidations = x.CommonValidations + i.SimpleSchema = x.SimpleSchema + i.Extensions = internal.SanitizeExtensions(x.Extensions) + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +func (i Items) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(i.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(i.SimpleSchema) + x.Ref = i.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(i.Extensions) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go new file mode 100644 index 000000000..f20961b4f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go new file mode 100644 index 000000000..63eed3460 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go @@ -0,0 +1,146 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type operationPropsOmitZero struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty,omitzero"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitzero"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, o) + } + + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + type OperationPropsNoMethods OperationProps // strip MarshalJSON method + var x struct { + Extensions + OperationPropsNoMethods + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = OperationProps(x.OperationPropsNoMethods) + return nil +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(o) + } + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (o Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go new file mode 100644 index 000000000..53d1e0aa9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go @@ -0,0 +1,172 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type paramPropsOmitZero struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitzero"` + Schema *Schema `json:"schema,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + ParamProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + p.CommonValidations = x.CommonValidations + p.SimpleSchema = x.SimpleSchema + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParamProps = x.ParamProps + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} + +func (p Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + ParamProps paramPropsOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(p.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(p.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParamProps = paramPropsOmitZero(p.ParamProps) + x.Ref = p.Refable.Ref.String() + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go new file mode 100644 index 000000000..1d1588cb9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go @@ -0,0 +1,113 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +func (p *PathItem) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + PathItemProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathItemProps = x.PathItemProps + + return nil +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} + +func (p PathItem) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + PathItemProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathItemProps = p.PathItemProps + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go new file mode 100644 index 000000000..18f6a9f42 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go @@ -0,0 +1,164 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + var ext any + var pi PathItem + switch k := tok.Kind(); k { + case 'n': + return nil // noop + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + ext = nil + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi = PathItem{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + p.Paths[k] = pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (p Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go new file mode 100644 index 000000000..775b3b0c3 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go @@ -0,0 +1,155 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" + + "k8s.io/kube-openapi/pkg/internal" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return r.String() + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + rr, err := http.Get(v) + if err != nil { + return false + } + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + return internal.JSONRefFromMap(&r.Ref, v) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go new file mode 100644 index 000000000..3ff1fe132 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go @@ -0,0 +1,131 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type responsePropsOmitZero struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitzero"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + + return nil +} + +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + ResponseProps + Extensions + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + if err := r.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps + + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } + b1, err := json.Marshal(r.ResponseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + ResponseProps responsePropsOmitZero `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = responsePropsOmitZero(r.ResponseProps) + return opts.MarshalNext(enc, x) +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go new file mode 100644 index 000000000..d9ad760a4 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go @@ -0,0 +1,208 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + if v, ok := res["default"]; ok { + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value + delete(res, "default") + } + for k, v := range res { + // Take all integral keys + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = value + } + } + return nil +} + +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + var ext any + var resp Response + switch k := tok.Kind(); k { + case 'n': + return nil // noop + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + ext = nil + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp = Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + respCopy := resp + r.ResponsesProps.Default = &respCopy + default: + if nk, err := strconv.Atoi(k); err == nil { + resp = Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go new file mode 100644 index 000000000..dfbb2e05c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go @@ -0,0 +1,631 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := url.Parse(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type schemaPropsOmitZero struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitzero"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitzero"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitzero"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitzero"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitzero"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitzero"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type swaggerSchemaPropsOmitZero struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitzero"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +func (s Schema) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + SchemaProps schemaPropsOmitZero `json:",inline"` + SwaggerSchemaProps swaggerSchemaPropsOmitZero `json:",inline"` + Schema string `json:"$schema,omitempty"` + Ref string `json:"$ref,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(s.Extensions)+len(s.ExtraProps)) + for k, v := range s.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range s.ExtraProps { + x.ArbitraryKeys[k] = v + } + x.SchemaProps = schemaPropsOmitZero(s.SchemaProps) + x.SwaggerSchemaProps = swaggerSchemaPropsOmitZero(s.SwaggerSchemaProps) + x.Ref = s.Ref.String() + x.Schema = string(s.Schema) + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} + +func (s *Schema) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + SchemaProps + SwaggerSchemaProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + if err := x.Ref.fromMap(x.Extensions); err != nil { + return err + } + + if err := x.Schema.fromMap(x.Extensions); err != nil { + return err + } + + delete(x.Extensions, "$ref") + delete(x.Extensions, "$schema") + + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(x.Extensions, pn) + } + if len(x.Extensions) == 0 { + x.Extensions = nil + } + + s.ExtraProps = x.Extensions.sanitizeWithExtra() + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SchemaProps = x.SchemaProps + s.SwaggerSchemaProps = x.SwaggerSchemaProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go new file mode 100644 index 000000000..e2b7da14c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go @@ -0,0 +1,92 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + b1, err := json.Marshal(s.SecuritySchemeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (s SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} + +func (s *SecurityScheme) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SecuritySchemeProps = x.SecuritySchemeProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go new file mode 100644 index 000000000..c8f3beaa3 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go @@ -0,0 +1,439 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SwaggerProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SwaggerProps = s.SwaggerProps + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + // Note: If you're willing to make breaking changes, it is possible to + // optimize this and other usages of this pattern: + // https://github.com/kubernetes/kube-openapi/pull/319#discussion_r983165948 + var x struct { + Extensions + SwaggerProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SwaggerProps = x.SwaggerProps + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + + if s.Schema == nil && !s.Allows { + return enc.WriteToken(jsonv2.False) + } + return enc.WriteToken(jsonv2.True) +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + + var nw SchemaOrBool + if len(data) > 0 && data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + nw.Allows = true + } else { + json.Unmarshal(data, &nw.Allows) + } + *s = nw + return nil +} + +func (s *SchemaOrBool) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch k := dec.PeekKind(); k { + case '{': + err := opts.UnmarshalNext(dec, &s.Schema) + if err != nil { + return err + } + s.Allows = true + return nil + case 't', 'f': + err := opts.UnmarshalNext(dec, &s.Allows) + if err != nil { + return err + } + return nil + default: + return fmt.Errorf("expected object or bool, not '%v'", k.String()) + } +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if len(s.Property) > 0 { + return opts.MarshalNext(enc, s.Property) + } + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + return enc.WriteToken(jsonv2.Null) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s *SchemaOrStringArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch dec.PeekKind() { + case '{': + return opts.UnmarshalNext(dec, &s.Schema) + case '[': + return opts.UnmarshalNext(dec, &s.Property) + default: + _, err := dec.ReadValue() + return err + } +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +func (s *StringOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch k := dec.PeekKind(); k { + case '[': + *s = StringOrArray{} + return opts.UnmarshalNext(dec, (*[]string)(s)) + case '"': + *s = StringOrArray{""} + return opts.UnmarshalNext(dec, &(*s)[0]) + case 'n': + // Throw out null token + _, _ = dec.ReadToken() + return nil + default: + return fmt.Errorf("expected string or array, not '%v'", k.String()) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + if s.Schemas != nil { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schemas != nil { + return opts.MarshalNext(enc, s.Schemas) + } + return opts.MarshalNext(enc, s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +func (s *SchemaOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch dec.PeekKind() { + case '{': + return opts.UnmarshalNext(dec, &s.Schema) + case '[': + return opts.UnmarshalNext(dec, &s.Schemas) + default: + _, err := dec.ReadValue() + return err + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go new file mode 100644 index 000000000..d105d52ca --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go @@ -0,0 +1,91 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(t) + } + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +func (t Tag) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + TagProps + } + x.Extensions = internal.SanitizeExtensions(t.Extensions) + x.TagProps = t.TagProps + return opts.MarshalNext(enc, x) +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, t) + } + + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} + +func (t *Tag) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + TagProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + t.Extensions = internal.SanitizeExtensions(x.Extensions) + t.TagProps = x.TagProps + return nil +} diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go index dd181ce8d..b8b6af5c8 100644 --- a/vendor/k8s.io/utils/clock/clock.go +++ b/vendor/k8s.io/utils/clock/clock.go @@ -63,6 +63,16 @@ type WithDelayedExecution interface { AfterFunc(d time.Duration, f func()) Timer } +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + // Ticker defines the Ticker interface. type Ticker interface { C() <-chan time.Time diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go index fb493c4ba..79e11deb6 100644 --- a/vendor/k8s.io/utils/clock/testing/fake_clock.go +++ b/vendor/k8s.io/utils/clock/testing/fake_clock.go @@ -239,7 +239,8 @@ func (f *FakeClock) Sleep(d time.Duration) { // IntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration. // IntervalClock technically implements the other methods of clock.Clock, but each implementation is just a panic. -// See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. +// +// Deprecated: See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. type IntervalClock struct { Time time.Time Duration time.Duration @@ -282,9 +283,9 @@ func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { // NewTicker has no implementation yet and is omitted. // TODO: make interval clock use FakeClock so this can be implemented. -//func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { -// panic("IntervalClock doesn't implement NewTicker") -//} +func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { + panic("IntervalClock doesn't implement NewTicker") +} // Sleep is unimplemented, will panic. func (*IntervalClock) Sleep(d time.Duration) { diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go deleted file mode 100644 index e4e740cad..000000000 --- a/vendor/k8s.io/utils/integer/integer.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integer - -// IntMax returns the maximum of the params -func IntMax(a, b int) int { - if b > a { - return b - } - return a -} - -// IntMin returns the minimum of the params -func IntMin(a, b int) int { - if b < a { - return b - } - return a -} - -// Int32Max returns the maximum of the params -func Int32Max(a, b int32) int32 { - if b > a { - return b - } - return a -} - -// Int32Min returns the minimum of the params -func Int32Min(a, b int32) int32 { - if b < a { - return b - } - return a -} - -// Int64Max returns the maximum of the params -func Int64Max(a, b int64) int64 { - if b > a { - return b - } - return a -} - -// Int64Min returns the minimum of the params -func Int64Min(a, b int64) int64 { - if b < a { - return b - } - return a -} - -// RoundToInt32 rounds floats into integer numbers. -func RoundToInt32(a float64) int32 { - if a < 0 { - return int32(a - 0.5) - } - return int32(a + 0.5) -} diff --git a/vendor/k8s.io/utils/net/ipfamily.go b/vendor/k8s.io/utils/net/ipfamily.go new file mode 100644 index 000000000..1a51fa391 --- /dev/null +++ b/vendor/k8s.io/utils/net/ipfamily.go @@ -0,0 +1,181 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" +) + +// IPFamily refers to a specific family if not empty, i.e. "4" or "6". +type IPFamily string + +// Constants for valid IPFamilys: +const ( + IPFamilyUnknown IPFamily = "" + + IPv4 IPFamily = "4" + IPv6 IPFamily = "6" +) + +// IsDualStackIPs returns true if: +// - all elements of ips are valid +// - at least one IP from each family (v4 and v6) is present +func IsDualStackIPs(ips []net.IP) (bool, error) { + v4Found := false + v6Found := false + for i, ip := range ips { + switch IPFamilyOf(ip) { + case IPv4: + v4Found = true + case IPv6: + v6Found = true + default: + return false, fmt.Errorf("invalid IP[%d]: %v", i, ip) + } + } + + return (v4Found && v6Found), nil +} + +// IsDualStackIPStrings returns true if: +// - all elements of ips can be parsed as IPs +// - at least one IP from each family (v4 and v6) is present +func IsDualStackIPStrings(ips []string) (bool, error) { + parsedIPs := make([]net.IP, 0, len(ips)) + for i, ip := range ips { + parsedIP := ParseIPSloppy(ip) + if parsedIP == nil { + return false, fmt.Errorf("invalid IP[%d]: %v", i, ip) + } + parsedIPs = append(parsedIPs, parsedIP) + } + return IsDualStackIPs(parsedIPs) +} + +// IsDualStackCIDRs returns true if: +// - all elements of cidrs are non-nil +// - at least one CIDR from each family (v4 and v6) is present +func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for i, cidr := range cidrs { + switch IPFamilyOfCIDR(cidr) { + case IPv4: + v4Found = true + case IPv6: + v6Found = true + default: + return false, fmt.Errorf("invalid CIDR[%d]: %v", i, cidr) + } + } + + return (v4Found && v6Found), nil +} + +// IsDualStackCIDRStrings returns if +// - all elements of cidrs can be parsed as CIDRs +// - at least one CIDR from each family (v4 and v6) is present +func IsDualStackCIDRStrings(cidrs []string) (bool, error) { + parsedCIDRs, err := ParseCIDRs(cidrs) + if err != nil { + return false, err + } + return IsDualStackCIDRs(parsedCIDRs) +} + +// IPFamilyOf returns the IP family of ip, or IPFamilyUnknown if it is invalid. +func IPFamilyOf(ip net.IP) IPFamily { + switch { + case ip.To4() != nil: + return IPv4 + case ip.To16() != nil: + return IPv6 + default: + return IPFamilyUnknown + } +} + +// IPFamilyOfString returns the IP family of ip, or IPFamilyUnknown if ip cannot +// be parsed as an IP. +func IPFamilyOfString(ip string) IPFamily { + return IPFamilyOf(ParseIPSloppy(ip)) +} + +// IPFamilyOfCIDR returns the IP family of cidr. +func IPFamilyOfCIDR(cidr *net.IPNet) IPFamily { + if cidr == nil { + return IPFamilyUnknown + } + return IPFamilyOf(cidr.IP) +} + +// IPFamilyOfCIDRString returns the IP family of cidr. +func IPFamilyOfCIDRString(cidr string) IPFamily { + ip, _, _ := ParseCIDRSloppy(cidr) + return IPFamilyOf(ip) +} + +// IsIPv6 returns true if netIP is IPv6 (and false if it is IPv4, nil, or invalid). +func IsIPv6(netIP net.IP) bool { + return IPFamilyOf(netIP) == IPv6 +} + +// IsIPv6String returns true if ip contains a single IPv6 address and nothing else. It +// returns false if ip is an empty string, an IPv4 address, or anything else that is not a +// single IPv6 address. +func IsIPv6String(ip string) bool { + return IPFamilyOfString(ip) == IPv6 +} + +// IsIPv6CIDR returns true if a cidr is a valid IPv6 CIDR. It returns false if cidr is +// nil or an IPv4 CIDR. Its behavior is not defined if cidr is invalid. +func IsIPv6CIDR(cidr *net.IPNet) bool { + return IPFamilyOfCIDR(cidr) == IPv6 +} + +// IsIPv6CIDRString returns true if cidr contains a single IPv6 CIDR and nothing else. It +// returns false if cidr is an empty string, an IPv4 CIDR, or anything else that is not a +// single valid IPv6 CIDR. +func IsIPv6CIDRString(cidr string) bool { + return IPFamilyOfCIDRString(cidr) == IPv6 +} + +// IsIPv4 returns true if netIP is IPv4 (and false if it is IPv6, nil, or invalid). +func IsIPv4(netIP net.IP) bool { + return IPFamilyOf(netIP) == IPv4 +} + +// IsIPv4String returns true if ip contains a single IPv4 address and nothing else. It +// returns false if ip is an empty string, an IPv6 address, or anything else that is not a +// single IPv4 address. +func IsIPv4String(ip string) bool { + return IPFamilyOfString(ip) == IPv4 +} + +// IsIPv4CIDR returns true if cidr is a valid IPv4 CIDR. It returns false if cidr is nil +// or an IPv6 CIDR. Its behavior is not defined if cidr is invalid. +func IsIPv4CIDR(cidr *net.IPNet) bool { + return IPFamilyOfCIDR(cidr) == IPv4 +} + +// IsIPv4CIDRString returns true if cidr contains a single IPv4 CIDR and nothing else. It +// returns false if cidr is an empty string, an IPv6 CIDR, or anything else that is not a +// single valid IPv4 CIDR. +func IsIPv4CIDRString(cidr string) bool { + return IPFamilyOfCIDRString(cidr) == IPv4 +} diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 000000000..7cb7795be --- /dev/null +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go index b7c08e2e0..704c1f232 100644 --- a/vendor/k8s.io/utils/net/net.go +++ b/vendor/k8s.io/utils/net/net.go @@ -29,138 +29,16 @@ import ( // order is maintained func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { cidrs := make([]*net.IPNet, 0, len(cidrsString)) - for _, cidrString := range cidrsString { + for i, cidrString := range cidrsString { _, cidr, err := ParseCIDRSloppy(cidrString) if err != nil { - return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) + return nil, fmt.Errorf("invalid CIDR[%d]: %v (%v)", i, cidr, err) } cidrs = append(cidrs, cidr) } return cidrs, nil } -// IsDualStackIPs returns if a slice of ips is: -// - all are valid ips -// - at least one ip from each family (v4 or v6) -func IsDualStackIPs(ips []net.IP) (bool, error) { - v4Found := false - v6Found := false - for _, ip := range ips { - if ip == nil { - return false, fmt.Errorf("ip %v is invalid", ip) - } - - if v4Found && v6Found { - continue - } - - if IsIPv6(ip) { - v6Found = true - continue - } - - v4Found = true - } - - return (v4Found && v6Found), nil -} - -// IsDualStackIPStrings returns if -// - all are valid ips -// - at least one ip from each family (v4 or v6) -func IsDualStackIPStrings(ips []string) (bool, error) { - parsedIPs := make([]net.IP, 0, len(ips)) - for _, ip := range ips { - parsedIP := ParseIPSloppy(ip) - parsedIPs = append(parsedIPs, parsedIP) - } - return IsDualStackIPs(parsedIPs) -} - -// IsDualStackCIDRs returns if -// - all are valid cidrs -// - at least one cidr from each family (v4 or v6) -func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { - v4Found := false - v6Found := false - for _, cidr := range cidrs { - if cidr == nil { - return false, fmt.Errorf("cidr %v is invalid", cidr) - } - - if v4Found && v6Found { - continue - } - - if IsIPv6(cidr.IP) { - v6Found = true - continue - } - v4Found = true - } - - return v4Found && v6Found, nil -} - -// IsDualStackCIDRStrings returns if -// - all are valid cidrs -// - at least one cidr from each family (v4 or v6) -func IsDualStackCIDRStrings(cidrs []string) (bool, error) { - parsedCIDRs, err := ParseCIDRs(cidrs) - if err != nil { - return false, err - } - return IsDualStackCIDRs(parsedCIDRs) -} - -// IsIPv6 returns if netIP is IPv6. -func IsIPv6(netIP net.IP) bool { - return netIP != nil && netIP.To4() == nil -} - -// IsIPv6String returns if ip is IPv6. -func IsIPv6String(ip string) bool { - netIP := ParseIPSloppy(ip) - return IsIPv6(netIP) -} - -// IsIPv6CIDRString returns if cidr is IPv6. -// This assumes cidr is a valid CIDR. -func IsIPv6CIDRString(cidr string) bool { - ip, _, _ := ParseCIDRSloppy(cidr) - return IsIPv6(ip) -} - -// IsIPv6CIDR returns if a cidr is ipv6 -func IsIPv6CIDR(cidr *net.IPNet) bool { - ip := cidr.IP - return IsIPv6(ip) -} - -// IsIPv4 returns if netIP is IPv4. -func IsIPv4(netIP net.IP) bool { - return netIP != nil && netIP.To4() != nil -} - -// IsIPv4String returns if ip is IPv4. -func IsIPv4String(ip string) bool { - netIP := ParseIPSloppy(ip) - return IsIPv4(netIP) -} - -// IsIPv4CIDR returns if a cidr is ipv4 -func IsIPv4CIDR(cidr *net.IPNet) bool { - ip := cidr.IP - return IsIPv4(ip) -} - -// IsIPv4CIDRString returns if cidr is IPv4. -// This assumes cidr is a valid CIDR. -func IsIPv4CIDRString(cidr string) bool { - ip, _, _ := ParseCIDRSloppy(cidr) - return IsIPv4(ip) -} - // ParsePort parses a string representing an IP port. If the string is not a // valid port number, this returns an error. func ParsePort(port string, allowZero bool) (int, error) { diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go index 7ac04f0dc..c6a53fa02 100644 --- a/vendor/k8s.io/utils/net/port.go +++ b/vendor/k8s.io/utils/net/port.go @@ -23,15 +23,6 @@ import ( "strings" ) -// IPFamily refers to a specific family if not empty, i.e. "4" or "6". -type IPFamily string - -// Constants for valid IPFamilys: -const ( - IPv4 IPFamily = "4" - IPv6 = "6" -) - // Protocol is a network protocol support by LocalPort. type Protocol string @@ -67,7 +58,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco if protocol != TCP && protocol != UDP { return nil, fmt.Errorf("Unsupported protocol %s", protocol) } - if ipFamily != "" && ipFamily != "4" && ipFamily != "6" { + if ipFamily != IPFamilyUnknown && ipFamily != IPv4 && ipFamily != IPv6 { return nil, fmt.Errorf("Invalid IP family %s", ipFamily) } if ip != "" { @@ -75,9 +66,10 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco if parsedIP == nil { return nil, fmt.Errorf("invalid ip address %s", ip) } - asIPv4 := parsedIP.To4() - if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 { - return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + if ipFamily != IPFamilyUnknown { + if IPFamily(parsedIP) != ipFamily { + return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + } } } return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 000000000..0d6392752 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/pointer/README.md b/vendor/k8s.io/utils/pointer/README.md new file mode 100644 index 000000000..2ca8073dc --- /dev/null +++ b/vendor/k8s.io/utils/pointer/README.md @@ -0,0 +1,3 @@ +# Pointer + +This package provides some functions for pointer-based operations. diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go new file mode 100644 index 000000000..b673a6425 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -0,0 +1,249 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Deprecated: Use functions in k8s.io/utils/ptr instead: ptr.To to obtain +// a pointer, ptr.Deref to dereference a pointer, ptr.Equal to compare +// dereferenced pointers. +package pointer + +import ( + "time" + + "k8s.io/utils/ptr" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +// +// Deprecated: Use ptr.AllPtrFieldsNil instead. +var AllPtrFieldsNil = ptr.AllPtrFieldsNil + +// Int returns a pointer to an int. +var Int = ptr.To[int] + +// IntPtr is a function variable referring to Int. +// +// Deprecated: Use ptr.To instead. +var IntPtr = Int // for back-compat + +// IntDeref dereferences the int ptr and returns it if not nil, or else +// returns def. +var IntDeref = ptr.Deref[int] + +// IntPtrDerefOr is a function variable referring to IntDeref. +// +// Deprecated: Use ptr.Deref instead. +var IntPtrDerefOr = IntDeref // for back-compat + +// Int32 returns a pointer to an int32. +var Int32 = ptr.To[int32] + +// Int32Ptr is a function variable referring to Int32. +// +// Deprecated: Use ptr.To instead. +var Int32Ptr = Int32 // for back-compat + +// Int32Deref dereferences the int32 ptr and returns it if not nil, or else +// returns def. +var Int32Deref = ptr.Deref[int32] + +// Int32PtrDerefOr is a function variable referring to Int32Deref. +// +// Deprecated: Use ptr.Deref instead. +var Int32PtrDerefOr = Int32Deref // for back-compat + +// Int32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Int32Equal = ptr.Equal[int32] + +// Uint returns a pointer to an uint +var Uint = ptr.To[uint] + +// UintPtr is a function variable referring to Uint. +// +// Deprecated: Use ptr.To instead. +var UintPtr = Uint // for back-compat + +// UintDeref dereferences the uint ptr and returns it if not nil, or else +// returns def. +var UintDeref = ptr.Deref[uint] + +// UintPtrDerefOr is a function variable referring to UintDeref. +// +// Deprecated: Use ptr.Deref instead. +var UintPtrDerefOr = UintDeref // for back-compat + +// Uint32 returns a pointer to an uint32. +var Uint32 = ptr.To[uint32] + +// Uint32Ptr is a function variable referring to Uint32. +// +// Deprecated: Use ptr.To instead. +var Uint32Ptr = Uint32 // for back-compat + +// Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else +// returns def. +var Uint32Deref = ptr.Deref[uint32] + +// Uint32PtrDerefOr is a function variable referring to Uint32Deref. +// +// Deprecated: Use ptr.Deref instead. +var Uint32PtrDerefOr = Uint32Deref // for back-compat + +// Uint32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Uint32Equal = ptr.Equal[uint32] + +// Int64 returns a pointer to an int64. +var Int64 = ptr.To[int64] + +// Int64Ptr is a function variable referring to Int64. +// +// Deprecated: Use ptr.To instead. +var Int64Ptr = Int64 // for back-compat + +// Int64Deref dereferences the int64 ptr and returns it if not nil, or else +// returns def. +var Int64Deref = ptr.Deref[int64] + +// Int64PtrDerefOr is a function variable referring to Int64Deref. +// +// Deprecated: Use ptr.Deref instead. +var Int64PtrDerefOr = Int64Deref // for back-compat + +// Int64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Int64Equal = ptr.Equal[int64] + +// Uint64 returns a pointer to an uint64. +var Uint64 = ptr.To[uint64] + +// Uint64Ptr is a function variable referring to Uint64. +// +// Deprecated: Use ptr.To instead. +var Uint64Ptr = Uint64 // for back-compat + +// Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else +// returns def. +var Uint64Deref = ptr.Deref[uint64] + +// Uint64PtrDerefOr is a function variable referring to Uint64Deref. +// +// Deprecated: Use ptr.Deref instead. +var Uint64PtrDerefOr = Uint64Deref // for back-compat + +// Uint64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Uint64Equal = ptr.Equal[uint64] + +// Bool returns a pointer to a bool. +var Bool = ptr.To[bool] + +// BoolPtr is a function variable referring to Bool. +// +// Deprecated: Use ptr.To instead. +var BoolPtr = Bool // for back-compat + +// BoolDeref dereferences the bool ptr and returns it if not nil, or else +// returns def. +var BoolDeref = ptr.Deref[bool] + +// BoolPtrDerefOr is a function variable referring to BoolDeref. +// +// Deprecated: Use ptr.Deref instead. +var BoolPtrDerefOr = BoolDeref // for back-compat + +// BoolEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +var BoolEqual = ptr.Equal[bool] + +// String returns a pointer to a string. +var String = ptr.To[string] + +// StringPtr is a function variable referring to String. +// +// Deprecated: Use ptr.To instead. +var StringPtr = String // for back-compat + +// StringDeref dereferences the string ptr and returns it if not nil, or else +// returns def. +var StringDeref = ptr.Deref[string] + +// StringPtrDerefOr is a function variable referring to StringDeref. +// +// Deprecated: Use ptr.Deref instead. +var StringPtrDerefOr = StringDeref // for back-compat + +// StringEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +var StringEqual = ptr.Equal[string] + +// Float32 returns a pointer to a float32. +var Float32 = ptr.To[float32] + +// Float32Ptr is a function variable referring to Float32. +// +// Deprecated: Use ptr.To instead. +var Float32Ptr = Float32 + +// Float32Deref dereferences the float32 ptr and returns it if not nil, or else +// returns def. +var Float32Deref = ptr.Deref[float32] + +// Float32PtrDerefOr is a function variable referring to Float32Deref. +// +// Deprecated: Use ptr.Deref instead. +var Float32PtrDerefOr = Float32Deref // for back-compat + +// Float32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Float32Equal = ptr.Equal[float32] + +// Float64 returns a pointer to a float64. +var Float64 = ptr.To[float64] + +// Float64Ptr is a function variable referring to Float64. +// +// Deprecated: Use ptr.To instead. +var Float64Ptr = Float64 + +// Float64Deref dereferences the float64 ptr and returns it if not nil, or else +// returns def. +var Float64Deref = ptr.Deref[float64] + +// Float64PtrDerefOr is a function variable referring to Float64Deref. +// +// Deprecated: Use ptr.Deref instead. +var Float64PtrDerefOr = Float64Deref // for back-compat + +// Float64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +var Float64Equal = ptr.Equal[float64] + +// Duration returns a pointer to a time.Duration. +var Duration = ptr.To[time.Duration] + +// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else +// returns def. +var DurationDeref = ptr.Deref[time.Duration] + +// DurationEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +var DurationEqual = ptr.Equal[time.Duration] diff --git a/vendor/k8s.io/utils/ptr/OWNERS b/vendor/k8s.io/utils/ptr/OWNERS new file mode 100644 index 000000000..0d6392752 --- /dev/null +++ b/vendor/k8s.io/utils/ptr/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/ptr/README.md b/vendor/k8s.io/utils/ptr/README.md new file mode 100644 index 000000000..2ca8073dc --- /dev/null +++ b/vendor/k8s.io/utils/ptr/README.md @@ -0,0 +1,3 @@ +# Pointer + +This package provides some functions for pointer-based operations. diff --git a/vendor/k8s.io/utils/ptr/ptr.go b/vendor/k8s.io/utils/ptr/ptr.go new file mode 100644 index 000000000..659ed3b9e --- /dev/null +++ b/vendor/k8s.io/utils/ptr/ptr.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// To returns a pointer to the given value. +func To[T any](v T) *T { + return &v +} + +// Deref dereferences ptr and returns the value it points to if no nil, or else +// returns def. +func Deref[T any](ptr *T, def T) T { + if ptr != nil { + return *ptr + } + return def +} + +// Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Equal[T comparable](a, b *T) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/k8s.io/utils/strings/slices/slices.go b/vendor/k8s.io/utils/strings/slices/slices.go new file mode 100644 index 000000000..8e21838f2 --- /dev/null +++ b/vendor/k8s.io/utils/strings/slices/slices.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slices defines various functions useful with slices of string type. +// The goal is to be as close as possible to +// https://github.com/golang/go/issues/45955. Ideal would be if we can just +// replace "stringslices" if the "slices" package becomes standard. +package slices + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in index order, and the +// comparison stops at the first unequal pair. +func Equal(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + for i, n := range s1 { + if n != s2[i] { + return false + } + } + return true +} + +// Filter appends to d each element e of s for which keep(e) returns true. +// It returns the modified d. d may be s[:0], in which case the kept +// elements will be stored in the same slice. +// if the slices overlap in some other way, the results are unspecified. +// To create a new slice with the filtered results, pass nil for d. +func Filter(d, s []string, keep func(string) bool) []string { + for _, n := range s { + if keep(n) { + d = append(d, n) + } + } + return d +} + +// Contains reports whether v is present in s. +func Contains(s []string, v string) bool { + return Index(s, v) >= 0 +} + +// Index returns the index of the first occurrence of v in s, or -1 if +// not present. +func Index(s []string, v string) int { + // "Contains" may be replaced with "Index(s, v) >= 0": + // https://github.com/golang/go/issues/45955#issuecomment-873377947 + for i, n := range s { + if n == v { + return i + } + } + return -1 +} + +// Functions below are not in https://github.com/golang/go/issues/45955 + +// Clone returns a new clone of s. +func Clone(s []string) []string { + // https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go + if s == nil { + return nil + } + c := make([]string, len(s)) + copy(c, s) + return c +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 3023d1066..559aebb59 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "math/rand" + "sync" "time" "k8s.io/klog/v2" @@ -64,6 +65,11 @@ func durationToMilliseconds(timeDuration time.Duration) int64 { } type traceItem interface { + // rLock must be called before invoking time or writeItem. + rLock() + // rUnlock must be called after processing the item is complete. + rUnlock() + // time returns when the trace was recorded as completed. time() time.Time // writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the @@ -78,6 +84,10 @@ type traceStep struct { fields []Field } +// rLock doesn't need to do anything because traceStep instances are immutable. +func (s traceStep) rLock() {} +func (s traceStep) rUnlock() {} + func (s traceStep) time() time.Time { return s.stepTime } @@ -93,13 +103,24 @@ func (s traceStep) writeItem(b *bytes.Buffer, formatter string, startTime time.T // Trace keeps track of a set of "steps" and allows us to log a specific // step if it took longer than its share of the total allowed time type Trace struct { + // constant fields name string fields []Field - threshold *time.Duration startTime time.Time - endTime *time.Time - traceItems []traceItem parentTrace *Trace + // fields guarded by a lock + lock sync.RWMutex + threshold *time.Duration + endTime *time.Time + traceItems []traceItem +} + +func (t *Trace) rLock() { + t.lock.RLock() +} + +func (t *Trace) rUnlock() { + t.lock.RUnlock() } func (t *Trace) time() time.Time { @@ -138,6 +159,8 @@ func New(name string, fields ...Field) *Trace { // how long it took. The Fields add key value pairs to provide additional details about the trace // step. func (t *Trace) Step(msg string, fields ...Field) { + t.lock.Lock() + defer t.lock.Unlock() if t.traceItems == nil { // traces almost always have less than 6 steps, do this to avoid more than a single allocation t.traceItems = make([]traceItem, 0, 6) @@ -153,7 +176,9 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace { newTrace := New(msg, fields...) if t != nil { newTrace.parentTrace = t + t.lock.Lock() t.traceItems = append(t.traceItems, newTrace) + t.lock.Unlock() } return newTrace } @@ -163,9 +188,11 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace { // is logged. func (t *Trace) Log() { endTime := time.Now() + t.lock.Lock() t.endTime = &endTime + t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level - if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace + if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() } } @@ -178,13 +205,17 @@ func (t *Trace) Log() { // If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it // is nested within is logged. func (t *Trace) LogIfLong(threshold time.Duration) { + t.lock.Lock() t.threshold = &threshold + t.lock.Unlock() t.Log() } // logTopLevelTraces finds all traces in a hierarchy of nested traces that should be logged but do not have any // parents that will be logged, due to threshold limits, and logs them as top level traces. func (t *Trace) logTrace() { + t.lock.RLock() + defer t.lock.RUnlock() if t.durationIsWithinThreshold() { var buffer bytes.Buffer traceNum := rand.Int31() @@ -217,8 +248,10 @@ func (t *Trace) logTrace() { func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) { lastStepTime := t.startTime for _, stepOrTrace := range t.traceItems { + stepOrTrace.rLock() stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold) lastStepTime = stepOrTrace.time() + stepOrTrace.rUnlock() } } @@ -244,9 +277,13 @@ func (t *Trace) calculateStepThreshold() *time.Duration { traceThreshold := *t.threshold for _, s := range t.traceItems { nestedTrace, ok := s.(*Trace) - if ok && nestedTrace.threshold != nil { - traceThreshold = traceThreshold - *nestedTrace.threshold - lenTrace-- + if ok { + nestedTrace.lock.RLock() + if nestedTrace.threshold != nil { + traceThreshold = traceThreshold - *nestedTrace.threshold + lenTrace-- + } + nestedTrace.lock.RUnlock() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 49e3b8c84..aef662001 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,54 +1,84 @@ -# github.com/davecgh/go-spew v1.1.1 +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/evanphx/json-patch v5.6.0+incompatible -## explicit -github.com/evanphx/json-patch -# github.com/go-logr/logr v1.2.0 -## explicit; go 1.16 +# github.com/emicklei/go-restful/v3 v3.11.0 +## explicit; go 1.13 +github.com/emicklei/go-restful/v3 +github.com/emicklei/go-restful/v3/log +# github.com/fxamacker/cbor/v2 v2.7.0 +## explicit; go 1.17 +github.com/fxamacker/cbor/v2 +# github.com/go-logr/logr v1.4.2 +## explicit; go 1.18 github.com/go-logr/logr +# github.com/go-openapi/jsonpointer v0.19.6 +## explicit; go 1.13 +github.com/go-openapi/jsonpointer +# github.com/go-openapi/jsonreference v0.20.2 +## explicit; go 1.13 +github.com/go-openapi/jsonreference +github.com/go-openapi/jsonreference/internal +# github.com/go-openapi/swag v0.22.4 +## explicit; go 1.18 +github.com/go-openapi/swag # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/protobuf v1.5.2 -## explicit; go 1.9 +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.5.6 -## explicit; go 1.8 +# github.com/google/gnostic-models v0.6.8 +## explicit; go 1.18 +github.com/google/gnostic-models/compiler +github.com/google/gnostic-models/extensions +github.com/google/gnostic-models/jsonschema +github.com/google/gnostic-models/openapiv2 +github.com/google/gnostic-models/openapiv3 +# github.com/google/go-cmp v0.6.0 +## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.1.0 +# github.com/google/gofuzz v1.2.0 ## explicit; go 1.12 github.com/google/gofuzz -# github.com/googleapis/gnostic v0.5.5 -## explicit; go 1.12 -github.com/googleapis/gnostic/compiler -github.com/googleapis/gnostic/extensions -github.com/googleapis/gnostic/jsonschema -github.com/googleapis/gnostic/openapiv2 -# github.com/imdario/mergo v0.3.5 +github.com/google/gofuzz/bytesource +# github.com/google/uuid v1.6.0 +## explicit +github.com/google/uuid +# github.com/imdario/mergo v0.3.6 ## explicit github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 ## explicit github.com/inconshreveable/mousetrap +# github.com/josharian/intern v1.0.0 +## explicit; go 1.5 +github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +# github.com/mailru/easyjson v0.7.7 +## explicit; go 1.12 +github.com/mailru/easyjson/buffer +github.com/mailru/easyjson/jlexer +github.com/mailru/easyjson/jwriter # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 +# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 +## explicit +github.com/munnerz/goautoneg # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -61,16 +91,17 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# golang.org/x/net v0.23.0 +# github.com/x448/float16 v0.8.4 +## explicit; go 1.11 +github.com/x448/float16 +# golang.org/x/net v0.26.0 ## explicit; go 1.18 -golang.org/x/net/context -golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 -## explicit; go 1.11 +# golang.org/x/oauth2 v0.21.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sys v0.26.0 @@ -87,26 +118,18 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# google.golang.org/appengine v1.6.7 -## explicit; go 1.11 -google.golang.org/appengine/internal -google.golang.org/appengine/internal/base -google.golang.org/appengine/internal/datastore -google.golang.org/appengine/internal/log -google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/urlfetch -# google.golang.org/protobuf v1.33.0 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag @@ -133,26 +156,31 @@ google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb +# gopkg.in/evanphx/json-patch.v4 v4.12.0 +## explicit +gopkg.in/evanphx/json-patch.v4 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 -# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 -## explicit # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.23.17 -## explicit; go 1.19 +# k8s.io/api v0.31.3 +## explicit; go 1.22.0 k8s.io/api/admissionregistration/v1 +k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apidiscovery/v2 +k8s.io/api/apidiscovery/v2beta1 k8s.io/api/apiserverinternal/v1alpha1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 k8s.io/api/authentication/v1 +k8s.io/api/authentication/v1alpha1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 @@ -163,8 +191,10 @@ k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/certificates/v1 +k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 +k8s.io/api/coordination/v1alpha1 k8s.io/api/coordination/v1beta1 k8s.io/api/core/v1 k8s.io/api/discovery/v1 @@ -172,10 +202,12 @@ k8s.io/api/discovery/v1beta1 k8s.io/api/events/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 -k8s.io/api/flowcontrol/v1alpha1 +k8s.io/api/flowcontrol/v1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 +k8s.io/api/flowcontrol/v1beta3 k8s.io/api/networking/v1 +k8s.io/api/networking/v1alpha1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1 k8s.io/api/node/v1alpha1 @@ -185,20 +217,27 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 +k8s.io/api/resource/v1alpha3 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.23.17 -## explicit; go 1.19 +k8s.io/api/storagemigration/v1alpha1 +# k8s.io/apimachinery v0.31.3 +## explicit; go 1.22.0 +k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/api/meta/testrestmapper k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/internalversion/validation k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams @@ -207,6 +246,8 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct +k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer @@ -216,11 +257,13 @@ k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/dump k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/managedfields +k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net @@ -235,9 +278,10 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.23.17 -## explicit; go 1.19 +# k8s.io/client-go v0.31.3 +## explicit; go 1.22.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 +k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1 k8s.io/client-go/applyconfigurations/apps/v1 @@ -250,8 +294,10 @@ k8s.io/client-go/applyconfigurations/autoscaling/v2beta2 k8s.io/client-go/applyconfigurations/batch/v1 k8s.io/client-go/applyconfigurations/batch/v1beta1 k8s.io/client-go/applyconfigurations/certificates/v1 +k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 +k8s.io/client-go/applyconfigurations/coordination/v1alpha1 k8s.io/client-go/applyconfigurations/coordination/v1beta1 k8s.io/client-go/applyconfigurations/core/v1 k8s.io/client-go/applyconfigurations/discovery/v1 @@ -259,12 +305,14 @@ k8s.io/client-go/applyconfigurations/discovery/v1beta1 k8s.io/client-go/applyconfigurations/events/v1 k8s.io/client-go/applyconfigurations/events/v1beta1 k8s.io/client-go/applyconfigurations/extensions/v1beta1 -k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1 +k8s.io/client-go/applyconfigurations/flowcontrol/v1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 +k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 +k8s.io/client-go/applyconfigurations/networking/v1alpha1 k8s.io/client-go/applyconfigurations/networking/v1beta1 k8s.io/client-go/applyconfigurations/node/v1 k8s.io/client-go/applyconfigurations/node/v1alpha1 @@ -274,24 +322,30 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 +k8s.io/client-go/applyconfigurations/resource/v1alpha3 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 k8s.io/client-go/applyconfigurations/storage/v1 k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 +k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1 k8s.io/client-go/discovery k8s.io/client-go/dynamic k8s.io/client-go/dynamic/fake +k8s.io/client-go/features +k8s.io/client-go/gentype k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1 +k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1beta2 k8s.io/client-go/kubernetes/typed/authentication/v1 +k8s.io/client-go/kubernetes/typed/authentication/v1alpha1 k8s.io/client-go/kubernetes/typed/authentication/v1beta1 k8s.io/client-go/kubernetes/typed/authorization/v1 k8s.io/client-go/kubernetes/typed/authorization/v1beta1 @@ -302,8 +356,10 @@ k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/certificates/v1 +k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 k8s.io/client-go/kubernetes/typed/coordination/v1 +k8s.io/client-go/kubernetes/typed/coordination/v1alpha1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/core/v1 k8s.io/client-go/kubernetes/typed/discovery/v1 @@ -311,10 +367,12 @@ k8s.io/client-go/kubernetes/typed/discovery/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3 k8s.io/client-go/kubernetes/typed/networking/v1 +k8s.io/client-go/kubernetes/typed/networking/v1alpha1 k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/kubernetes/typed/node/v1 k8s.io/client-go/kubernetes/typed/node/v1alpha1 @@ -324,16 +382,18 @@ k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 +k8s.io/client-go/kubernetes/typed/resource/v1alpha3 k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1 +k8s.io/client-go/openapi k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install k8s.io/client-go/pkg/apis/clientauthentication/v1 -k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec @@ -342,6 +402,7 @@ k8s.io/client-go/rest/watch k8s.io/client-go/testing k8s.io/client-go/tools/auth k8s.io/client-go/tools/cache +k8s.io/client-go/tools/cache/synctrack k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest @@ -352,36 +413,55 @@ k8s.io/client-go/tools/reference k8s.io/client-go/transport k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation +k8s.io/client-go/util/consistencydetector k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil +k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/klog/v2 v2.30.0 -## explicit; go 1.13 +# k8s.io/klog/v2 v2.130.1 +## explicit; go 1.18 k8s.io/klog/v2 -# k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 -## explicit; go 1.16 +k8s.io/klog/v2/internal/buffer +k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/dbg +k8s.io/klog/v2/internal/serialize +k8s.io/klog/v2/internal/severity +k8s.io/klog/v2/internal/sloghandler +# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +## explicit; go 1.20 +k8s.io/kube-openapi/pkg/cached +k8s.io/kube-openapi/pkg/common +k8s.io/kube-openapi/pkg/handler3 +k8s.io/kube-openapi/pkg/internal +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json k8s.io/kube-openapi/pkg/schemaconv +k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto -# k8s.io/utils v0.0.0-20211116205334-6203023598ed -## explicit; go 1.12 +k8s.io/kube-openapi/pkg/validation/spec +# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 +## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing -k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net +k8s.io/utils/pointer +k8s.io/utils/ptr +k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 -## explicit; go 1.16 +# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd +## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath +sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value -# sigs.k8s.io/yaml v1.2.0 +# sigs.k8s.io/yaml v1.4.0 ## explicit; go 1.12 sigs.k8s.io/yaml +sigs.k8s.io/yaml/goyaml.v2 diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index a047d981b..6a13cf2df 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -75,6 +75,8 @@ import ( // either be any string type, an integer, implement json.Unmarshaler, or // implement encoding.TextUnmarshaler. // +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. @@ -85,15 +87,14 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// -func Unmarshal(data []byte, v interface{}, opts ...UnmarshalOpt) error { +func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // Check for well-formedness. // Avoids filling out half a data structure // before discovering a JSON syntax error. @@ -167,16 +168,16 @@ func (e *InvalidUnmarshalError) Error() string { return "json: Unmarshal(nil)" } - if e.Type.Kind() != reflect.Ptr { + if e.Type.Kind() != reflect.Pointer { return "json: Unmarshal(non-pointer " + e.Type.String() + ")" } return "json: Unmarshal(nil " + e.Type.String() + ")" } */ -func (d *decodeState) unmarshal(v interface{}) error { +func (d *decodeState) unmarshal(v any) error { rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { + if rv.Kind() != reflect.Pointer || rv.IsNil() { return &InvalidUnmarshalError{reflect.TypeOf(v)} } @@ -233,7 +234,8 @@ type decodeState struct { disallowUnknownFields bool savedStrictErrors []error - seenStrictErrors map[string]struct{} + seenStrictErrors map[strictError]struct{} + strictFieldStack []string caseSensitive bool @@ -261,6 +263,8 @@ func (d *decodeState) init(data []byte) *decodeState { // Reuse the allocated space for the FieldStack slice. d.errorContext.FieldStack = d.errorContext.FieldStack[:0] } + // Reuse the allocated space for the strict FieldStack slice. + d.strictFieldStack = d.strictFieldStack[:0] return d } @@ -422,7 +426,7 @@ type unquotedValue struct{} // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { +func (d *decodeState) valueQuoted() any { switch d.opcode { default: panic(phasePanicMsg) @@ -464,7 +468,7 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { haveAddr = true v = v.Addr() } @@ -473,14 +477,14 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { haveAddr = false v = e continue } } - if v.Kind() != reflect.Ptr { + if v.Kind() != reflect.Pointer { break } @@ -555,6 +559,12 @@ func (d *decodeState) array(v reflect.Value) error { break } + origStrictFieldStackLen := len(d.strictFieldStack) + defer func() { + // Reset to original length and reuse the allocated space for the strict FieldStack slice. + d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen] + }() + i := 0 for { // Look ahead for ] - can only happen on first iteration. @@ -580,6 +590,7 @@ func (d *decodeState) array(v reflect.Value) error { } } + d.appendStrictFieldStackIndex(i) if i < v.Len() { // Decode into element. if err := d.value(v.Index(i)); err != nil { @@ -591,6 +602,8 @@ func (d *decodeState) array(v reflect.Value) error { return err } } + // Reset to original length and reuse the allocated space for the strict FieldStack slice. + d.strictFieldStack = d.strictFieldStack[:origStrictFieldStackLen] i++ // Next token must be , or ]. @@ -666,7 +679,7 @@ func (d *decodeState) object(v reflect.Value) error { reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: - if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil @@ -683,7 +696,7 @@ func (d *decodeState) object(v reflect.Value) error { seenKeys = map[string]struct{}{} } if _, seen := seenKeys[fieldName]; seen { - d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName)) + d.saveStrictError(d.newFieldError(duplicateStrictErrType, fieldName)) } else { seenKeys[fieldName] = struct{}{} } @@ -699,7 +712,7 @@ func (d *decodeState) object(v reflect.Value) error { var seenKeys uint64 checkDuplicateField = func(fieldNameIndex int, fieldName string) { if seenKeys&(1< startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. - ptr := v.Pointer() + ptr := v.UnsafePointer() if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -877,9 +876,9 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr uintptr + ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe len int - }{v.Pointer(), v.Len()} + }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -893,7 +892,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { func newSliceEncoder(t reflect.Type) encoderFunc { // Byte slices get special treatment; arrays don't. if t.Elem().Kind() == reflect.Uint8 { - p := reflect.PtrTo(t.Elem()) + p := reflect.PointerTo(t.Elem()) if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) { return encodeByteSlice } @@ -989,7 +988,7 @@ func isValidTag(s string) bool { func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } t = t.Field(i).Type @@ -1009,7 +1008,7 @@ func (w *reflectWithString) resolve() error { return nil } if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { - if w.k.Kind() == reflect.Ptr && w.k.IsNil() { + if w.k.Kind() == reflect.Pointer && w.k.IsNil() { return nil } buf, err := tm.MarshalText() @@ -1243,7 +1242,7 @@ func typeFields(t reflect.Type) structFields { sf := f.typ.Field(i) if sf.Anonymous { t := sf.Type - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } if !sf.IsExported() && t.Kind() != reflect.Struct { @@ -1269,7 +1268,7 @@ func typeFields(t reflect.Type) structFields { index[len(f.index)] = i ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { + if ft.Name() == "" && ft.Kind() == reflect.Pointer { // Follow pointer. ft = ft.Elem() } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go index 9e170127d..ab249b2bb 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go @@ -24,8 +24,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See https://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go index d3fa2d111..b8f4ff2c1 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gofuzz -// +build gofuzz package json @@ -12,10 +11,10 @@ import ( ) func Fuzz(data []byte) (score int) { - for _, ctor := range []func() interface{}{ - func() interface{} { return new(interface{}) }, - func() interface{} { return new(map[string]interface{}) }, - func() interface{} { return new([]interface{}) }, + for _, ctor := range []func() any{ + func() any { return new(any) }, + func() any { return new(map[string]any) }, + func() any { return new([]any) }, } { v := ctor() err := Unmarshal(data, v) diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go index 4b7acd6ef..e1c0a74d9 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go @@ -18,6 +18,7 @@ package json import ( gojson "encoding/json" + "strconv" "strings" ) @@ -69,27 +70,85 @@ func (d *Decoder) DisallowDuplicateFields() { d.d.disallowDuplicateFields = true } +func (d *decodeState) newFieldError(errType strictErrType, field string) *strictError { + if len(d.strictFieldStack) > 0 { + return &strictError{ + ErrType: errType, + Path: strings.Join(d.strictFieldStack, "") + "." + field, + } + } else { + return &strictError{ + ErrType: errType, + Path: field, + } + } +} + // saveStrictError saves a strict decoding error, // for reporting at the end of the unmarshal if no other errors occurred. -func (d *decodeState) saveStrictError(err error) { +func (d *decodeState) saveStrictError(err *strictError) { // prevent excessive numbers of accumulated errors if len(d.savedStrictErrors) >= 100 { return } // dedupe accumulated strict errors if d.seenStrictErrors == nil { - d.seenStrictErrors = map[string]struct{}{} + d.seenStrictErrors = map[strictError]struct{}{} } - msg := err.Error() - if _, seen := d.seenStrictErrors[msg]; seen { + if _, seen := d.seenStrictErrors[*err]; seen { return } // accumulate the error - d.seenStrictErrors[msg] = struct{}{} + d.seenStrictErrors[*err] = struct{}{} d.savedStrictErrors = append(d.savedStrictErrors, err) } +func (d *decodeState) appendStrictFieldStackKey(key string) { + if !d.disallowDuplicateFields && !d.disallowUnknownFields { + return + } + if len(d.strictFieldStack) > 0 { + d.strictFieldStack = append(d.strictFieldStack, ".", key) + } else { + d.strictFieldStack = append(d.strictFieldStack, key) + } +} + +func (d *decodeState) appendStrictFieldStackIndex(i int) { + if !d.disallowDuplicateFields && !d.disallowUnknownFields { + return + } + d.strictFieldStack = append(d.strictFieldStack, "[", strconv.Itoa(i), "]") +} + +type strictErrType string + +const ( + unknownStrictErrType strictErrType = "unknown field" + duplicateStrictErrType strictErrType = "duplicate field" +) + +// strictError is a strict decoding error +// It has an ErrType (either unknown or duplicate) +// and a path to the erroneous field +type strictError struct { + ErrType strictErrType + Path string +} + +func (e *strictError) Error() string { + return string(e.ErrType) + " " + strconv.Quote(e.Path) +} + +func (e *strictError) FieldPath() string { + return e.Path +} + +func (e *strictError) SetFieldPath(path string) { + e.Path = path +} + // UnmarshalStrictError holds errors resulting from use of strict disallow___ decoder directives. // If this is returned from Unmarshal(), it means the decoding was successful in all other respects. type UnmarshalStrictError struct { diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go index 9dc1903e2..22fc6922d 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go @@ -27,6 +27,7 @@ func Valid(data []byte) bool { // checkValid verifies that data is valid JSON-encoded data. // scan is passed in for use by checkValid to avoid an allocation. +// checkValid returns nil or a SyntaxError. func checkValid(data []byte, scan *scanner) error { scan.reset() for _, c := range data { @@ -42,6 +43,7 @@ func checkValid(data []byte, scan *scanner) error { } // A SyntaxError is a description of a JSON syntax error. +// Unmarshal will return a SyntaxError if the JSON can't be parsed. type SyntaxError struct { msg string // description of error Offset int64 // error occurred after reading Offset bytes @@ -83,7 +85,7 @@ type scanner struct { } var scannerPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &scanner{} }, } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 5f87df1c6..1967755ac 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -45,7 +45,7 @@ func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true // // See the documentation for Unmarshal for details about // the conversion of JSON into a Go value. -func (dec *Decoder) Decode(v interface{}) error { +func (dec *Decoder) Decode(v any) error { if dec.err != nil { return dec.err } @@ -197,7 +197,7 @@ func NewEncoder(w io.Writer) *Encoder { // // See the documentation for Marshal for details about the // conversion of Go values to JSON. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { if enc.err != nil { return enc.err } @@ -289,8 +289,7 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// -type Token interface{} +type Token any */ const ( @@ -457,7 +456,7 @@ func (dec *Decoder) Token() (Token, error) { if !dec.tokenValueAllowed() { return dec.tokenError(c) } - var x interface{} + var x any if err := dec.Decode(&x); err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go index c38fd5102..b490328f4 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go @@ -15,10 +15,8 @@ type tagOptions string // parseTag splits a struct field's json tag into its name and // comma-separated options. func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") + tag, opt, _ := strings.Cut(tag, ",") + return tag, tagOptions(opt) } // Contains reports whether a comma-separated list of options @@ -30,15 +28,11 @@ func (o tagOptions) Contains(optionName string) bool { } s := string(o) for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { + var name string + name, s, _ = strings.Cut(s, ",") + if name == optionName { return true } - s = next } return false } diff --git a/vendor/sigs.k8s.io/json/json.go b/vendor/sigs.k8s.io/json/json.go index 764e2a84c..e8f31b16c 100644 --- a/vendor/sigs.k8s.io/json/json.go +++ b/vendor/sigs.k8s.io/json/json.go @@ -34,13 +34,13 @@ type Decoder interface { } // NewDecoderCaseSensitivePreserveInts returns a decoder that matches the behavior of encoding/json#NewDecoder, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { d := internaljson.NewDecoder(r) d.CaseSensitive() @@ -51,13 +51,13 @@ func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { // UnmarshalCaseSensitivePreserveInts parses the JSON-encoded data and stores the result in the value pointed to by v. // // UnmarshalCaseSensitivePreserveInts matches the behavior of encoding/json#Unmarshal, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func UnmarshalCaseSensitivePreserveInts(data []byte, v interface{}) error { return internaljson.Unmarshal( data, @@ -84,6 +84,8 @@ const ( // and a list of the strict failures (if any) are returned. If no `strictOptions` are selected, // all supported strict checks are performed. // +// Strict errors returned will implement the FieldError interface for the specific erroneous fields. +// // Currently supported strict checks are: // - DisallowDuplicateFields: ensure the data contains no duplicate fields // - DisallowUnknownFields: ensure the data contains no unknown fields (when decoding into typed structs) @@ -137,3 +139,12 @@ func SyntaxErrorOffset(err error) (isSyntaxError bool, offset int64) { return false, 0 } } + +// FieldError is an error that provides access to the path of the erroneous field +type FieldError interface { + error + // FieldPath provides the full path of the erroneous field within the json object. + FieldPath() string + // SetFieldPath updates the path of the erroneous field output in the error message. + SetFieldPath(path string) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go index 9b14ca581..41fc2474a 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -28,20 +28,15 @@ import ( // for PathElementSet and SetNodeMap, so we could probably share the // code. type PathElementValueMap struct { - members sortedPathElementValues + valueMap PathElementMap } func MakePathElementValueMap(size int) PathElementValueMap { return PathElementValueMap{ - members: make(sortedPathElementValues, 0, size), + valueMap: MakePathElementMap(size), } } -type pathElementValue struct { - PathElement PathElement - Value value.Value -} - type sortedPathElementValues []pathElementValue // Implement the sort interface; this would permit bulk creation, which would @@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool { func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } // Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) @@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { return } if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v return } s.members = append(s.members, pathElementValue{}) @@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { // Get retrieves the value associated with the given PathElement from the map. // (nil, false) is returned if there is no such PathElement. -func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go new file mode 100644 index 000000000..f1aa25860 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + "sort" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Conflict is a conflict on a specific field with the current manager of +// that field. It does implement the error interface so that it can be +// used as an error. +type Conflict struct { + Manager string + Path fieldpath.Path +} + +// Conflict is an error. +var _ error = Conflict{} + +// Error formats the conflict as an error. +func (c Conflict) Error() string { + return fmt.Sprintf("conflict with %q: %v", c.Manager, c.Path) +} + +// Equals returns true if c == c2 +func (c Conflict) Equals(c2 Conflict) bool { + if c.Manager != c2.Manager { + return false + } + return c.Path.Equals(c2.Path) +} + +// Conflicts accumulates multiple conflicts and aggregates them by managers. +type Conflicts []Conflict + +var _ error = Conflicts{} + +// Error prints the list of conflicts, grouped by sorted managers. +func (conflicts Conflicts) Error() string { + if len(conflicts) == 1 { + return conflicts[0].Error() + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + managers := []string{} + for manager := range m { + managers = append(managers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(managers) + + messages := []string{} + for _, manager := range managers { + messages = append(messages, fmt.Sprintf("conflicts with %q:", manager)) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return strings.Join(messages, "\n") +} + +// Equals returns true if the lists of conflicts are the same. +func (c Conflicts) Equals(c2 Conflicts) bool { + if len(c) != len(c2) { + return false + } + for i := range c { + if !c[i].Equals(c2[i]) { + return false + } + } + return true +} + +// ToSet aggregates conflicts for all managers into a single Set. +func (c Conflicts) ToSet() *fieldpath.Set { + set := fieldpath.NewSet() + for _, conflict := range []Conflict(c) { + set.Insert(conflict.Path) + } + return set +} + +// ConflictsFromManagers creates a list of conflicts given Managers sets. +func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { + conflicts := []Conflict{} + + for manager, set := range sets { + set.Set().Iterate(func(p fieldpath.Path) { + conflicts = append(conflicts, Conflict{ + Manager: manager, + Path: p.Copy(), + }) + }) + } + + return conflicts +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go new file mode 100644 index 000000000..d5a977d60 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -0,0 +1,358 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package merge + +import ( + "fmt" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Converter is an interface to the conversion logic. The converter +// needs to be able to convert objects from one version to another. +type Converter interface { + Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) + IsMissingVersionError(error) bool +} + +// UpdateBuilder allows you to create a new Updater by exposing all of +// the options and setting them once. +type UpdaterBuilder struct { + Converter Converter + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + // Stop comparing the new object with old object after applying. + // This was initially used to avoid spurious etcd update, but + // since that's vastly inefficient, we've come-up with a better + // way of doing that. Create this flag to stop it. + // Comparing has become more expensive too now that we're not using + // `Compare` but `value.Equals` so this gives an option to avoid it. + ReturnInputOnNoop bool +} + +func (u *UpdaterBuilder) BuildUpdater() *Updater { + return &Updater{ + Converter: u.Converter, + IgnoredFields: u.IgnoredFields, + returnInputOnNoop: u.ReturnInputOnNoop, + } +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + // Deprecated: This will eventually become private. + Converter Converter + + // Deprecated: This will eventually become private. + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + returnInputOnNoop bool +} + +func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { + conflicts := fieldpath.ManagedFields{} + removed := fieldpath.ManagedFields{} + compare, err := oldObject.Compare(newObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + + versions := map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + + for manager, managerSet := range managers { + if manager == workflow { + continue + } + compare, ok := versions[managerSet.APIVersion()] + if !ok { + var err error + versionedOldObject, err := s.Converter.Convert(oldObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert old object: %v", err) + } + versionedNewObject, err := s.Converter.Convert(newObject, managerSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + delete(managers, manager) + continue + } + return nil, nil, fmt.Errorf("failed to convert new object: %v", err) + } + compare, err = versionedOldObject.Compare(versionedNewObject) + if err != nil { + return nil, nil, fmt.Errorf("failed to compare objects: %v", err) + } + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } + + conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) + if !conflictSet.Empty() { + conflicts[manager] = fieldpath.NewVersionedSet(conflictSet, managerSet.APIVersion(), false) + } + + if !compare.Removed.Empty() { + removed[manager] = fieldpath.NewVersionedSet(compare.Removed, managerSet.APIVersion(), false) + } + } + + if !force && len(conflicts) != 0 { + return nil, nil, ConflictsFromManagers(conflicts) + } + + for manager, conflictSet := range conflicts { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(conflictSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager, removedSet := range removed { + managers[manager] = fieldpath.NewVersionedSet(managers[manager].Set().Difference(removedSet.Set()), managers[manager].APIVersion(), managers[manager].Applied()) + } + + for manager := range managers { + if managers[manager].Set().Empty() { + delete(managers, manager) + } + } + + return managers, compare, nil +} + +// Update is the method you should call once you've merged your final +// object on CREATE/UPDATE/PATCH verbs. newObject must be the object +// that you intend to persist (after applying the patch if this is for a +// PATCH call), and liveObject must be the original object (empty if +// this is a CREATE call). +func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if _, ok := managers[manager]; !ok { + managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) + } + + ignored := s.IgnoredFields[version] + if ignored == nil { + ignored = fieldpath.NewSet() + } + managers[manager] = fieldpath.NewVersionedSet( + managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), + version, + false, + ) + if managers[manager].Set().Empty() { + delete(managers, manager) + } + return newObject, managers, nil +} + +// Apply should be called when Apply is run, given the current object as +// well as the configuration that is applied. This will merge the object +// and return it. +func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { + var err error + managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + newObject, err := liveObject.Merge(configObject) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) + } + lastSet := managers[manager] + set, err := configObject.ToFieldSet() + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) + } + + ignored := s.IgnoredFields[version] + if ignored != nil { + set = set.RecursiveDifference(ignored) + // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? + if lastSet != nil { + lastSet.Set().RecursiveDifference(ignored) + } + } + managers[manager] = fieldpath.NewVersionedSet(set, version, true) + newObject, err = s.prune(newObject, managers, manager, lastSet) + if err != nil { + return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) + } + managers, _, err = s.update(liveObject, newObject, version, managers, manager, force) + if err != nil { + return nil, fieldpath.ManagedFields{}, err + } + if !s.returnInputOnNoop && value.EqualsUsing(value.NewFreelistAllocator(), liveObject.AsValue(), newObject.AsValue()) { + newObject = nil + } + return newObject, managers, nil +} + +// prune will remove a field, list or map item, iff: +// * applyingManager applied it last time +// * applyingManager didn't apply it this time +// * no other applier claims to manage it +func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFields, applyingManager string, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + if lastSet == nil || lastSet.Set().Empty() { + return merged, nil + } + version := lastSet.APIVersion() + convertedMerged, err := s.Converter.Convert(merged, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert merged object to last applied version: %v", err) + } + + sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() + pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, version, managers, applyingManager) + if err != nil { + return nil, fmt.Errorf("failed add back owned items: %v", err) + } + pruned, err = s.addBackDanglingItems(convertedMerged, pruned, lastSet) + if err != nil { + return nil, fmt.Errorf("failed add back dangling items: %v", err) + } + return s.Converter.Convert(pruned, managers[applyingManager].APIVersion()) +} + +// addBackOwnedItems adds back any fields, list and map items that were removed by prune, +// but other appliers or updaters (or the current applier's new config) claim to own. +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, prunedVersion fieldpath.APIVersion, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { + var err error + managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} + for _, managerSet := range managedFields { + if _, ok := managedAtVersion[managerSet.APIVersion()]; !ok { + managedAtVersion[managerSet.APIVersion()] = fieldpath.NewSet() + } + managedAtVersion[managerSet.APIVersion()] = managedAtVersion[managerSet.APIVersion()].Union(managerSet.Set()) + } + // Add back owned items at pruned version first to avoid conversion failure + // caused by pruned fields which are required for conversion. + if managed, ok := managedAtVersion[prunedVersion]; ok { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) + if err != nil { + return nil, err + } + delete(managedAtVersion, prunedVersion) + } + for version, managed := range managedAtVersion { + merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, version, managed) + if err != nil { + return nil, err + } + } + return pruned, nil +} + +// addBackOwnedItemsForVersion adds back any fields, list and map items that were removed by prune with specific managed field path at a version. +// It is an extracted sub-function from addBackOwnedItems for code reuse. +func (s *Updater) addBackOwnedItemsForVersion(merged, pruned *typed.TypedValue, version fieldpath.APIVersion, managed *fieldpath.Set) (*typed.TypedValue, *typed.TypedValue, error) { + var err error + merged, err = s.Converter.Convert(merged, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert merged object at version %v: %v", version, err) + } + pruned, err = s.Converter.Convert(pruned, version) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, pruned, nil + } + return nil, nil, fmt.Errorf("failed to convert pruned object at version %v: %v", version, err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from merged object at version %v: %v", version, err) + } + prunedSet, err := pruned.ToFieldSet() + if err != nil { + return nil, nil, fmt.Errorf("failed to create field set from pruned object at version %v: %v", version, err) + } + sc, tr := merged.Schema(), merged.TypeRef() + pruned = merged.RemoveItems(mergedSet.EnsureNamedFieldsAreMembers(sc, tr).Difference(prunedSet.EnsureNamedFieldsAreMembers(sc, tr).Union(managed.EnsureNamedFieldsAreMembers(sc, tr)))) + return merged, pruned, nil +} + +// addBackDanglingItems makes sure that the fields list and map items removed by prune were +// previously owned by the currently applying manager. This will add back fields list and map items +// that are unowned or that are owned by Updaters and shouldn't be removed. +func (s *Updater) addBackDanglingItems(merged, pruned *typed.TypedValue, lastSet fieldpath.VersionedSet) (*typed.TypedValue, error) { + convertedPruned, err := s.Converter.Convert(pruned, lastSet.APIVersion()) + if err != nil { + if s.Converter.IsMissingVersionError(err) { + return merged, nil + } + return nil, fmt.Errorf("failed to convert pruned object to last applied version: %v", err) + } + prunedSet, err := convertedPruned.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from pruned object in last applied version: %v", err) + } + mergedSet, err := merged.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create field set from merged object in last applied version: %v", err) + } + sc, tr := merged.Schema(), merged.TypeRef() + prunedSet = prunedSet.EnsureNamedFieldsAreMembers(sc, tr) + mergedSet = mergedSet.EnsureNamedFieldsAreMembers(sc, tr) + last := lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr) + return merged.RemoveItems(mergedSet.Difference(prunedSet).Intersection(last)), nil +} + +// reconcileManagedFieldsWithSchemaChanges reconciles the managed fields with any changes to the +// object's schema since the managed fields were written. +// +// Supports: +// - changing types from atomic to granular +// - changing types from granular to atomic +func (s *Updater) reconcileManagedFieldsWithSchemaChanges(liveObject *typed.TypedValue, managers fieldpath.ManagedFields) (fieldpath.ManagedFields, error) { + result := fieldpath.ManagedFields{} + for manager, versionedSet := range managers { + tv, err := s.Converter.Convert(liveObject, versionedSet.APIVersion()) + if s.Converter.IsMissingVersionError(err) { // okay to skip, obsolete versions will be deleted automatically anyway + continue + } + if err != nil { + return nil, err + } + reconciled, err := typed.ReconcileFieldSetWithSchema(versionedSet.Set(), tv) + if err != nil { + return nil, err + } + if reconciled != nil { + result[manager] = fieldpath.NewVersionedSet(reconciled, versionedSet.APIVersion(), versionedSet.Applied()) + } else { + result[manager] = versionedSet + } + } + return result, nil +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 7e5dc7582..5d3707a5b 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -73,7 +73,7 @@ type Atom struct { } // Scalar (AKA "primitive") represents a type which has a single value which is -// either numeric, string, or boolean. +// either numeric, string, or boolean, or untyped for any of them. // // TODO: split numeric into float/int? Something even more fine-grained? type Scalar string @@ -82,6 +82,7 @@ const ( Numeric = Scalar("numeric") String = Scalar("string") Boolean = Scalar("boolean") + Untyped = Scalar("untyped") ) // ElementRelationship is an enum of the different possible relationships diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index 7d64d1308..6eb6c36df 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -110,7 +110,7 @@ var SchemaSchemaYAML = `types: scalar: string - name: deduceInvalidDiscriminator type: - scalar: bool + scalar: boolean - name: fields type: list: @@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types: list: elementType: scalar: string + elementRelationship: atomic - name: untyped map: fields: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go new file mode 100644 index 000000000..ed483cbbc --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -0,0 +1,460 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 19c77334f..78fdb0e75 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str return field.Default, nil } -func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // null entries are illegal. @@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, return pe, nil } -func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.IsMap(): @@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { - if list.ElementRelationship == schema.Associative { - if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(a, s, list, index, child) - } +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } - // If there's no keys, then we must be a set of primitives. - return setItemToPathElement(list, index, child) + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) } - // Use the index as a key for atomic lists. - return fieldpath.PathElement{Index: &index}, nil + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 913644083..fa227ac40 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -113,11 +113,12 @@ func (w *mergingWalker) doLeaf() { w.rule(w) } -func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { - errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) - errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) - if len(errs) > 0 { - return errs +func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) } // All scalars are leaf fields. @@ -179,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } out := make([]interface{}, 0, outLen) - rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) errs = append(errs, rhsErrs...) - lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) errs = append(errs, lhsErrs...) + if len(errs) != 0 { + return errs + } + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) - for i := range rhsOrder { - pe := &rhsOrder[i] + for i := range rhsPEs { + pe := &rhsPEs[i] if _, ok := observedLHS.Get(*pe); ok { sharedOrder = append(sharedOrder, pe) } @@ -198,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err sharedOrder = sharedOrder[1:] } - lLen, rLen = len(lhsOrder), len(rhsOrder) + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) for lI, rI := 0, 0; lI < lLen || rI < rLen; { if lI < lLen && rI < rLen { - pe := lhsOrder[lI] - if pe.Equals(rhsOrder[rI]) { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { // merge LHS & RHS items - lChild, _ := observedLHS.Get(pe) + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -221,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } continue } - if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { // shared item, but not the one we want in this round lI++ continue } } if lI < lLen { - pe := lhsOrder[lI] + pe := lhsPEs[lI] if _, ok := observedRHS.Get(pe); !ok { - // take LHS item - lChild, _ := observedLHS.Get(pe) + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) errs = append(errs, errs...) if mergeOut != nil { @@ -239,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } lI++ continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ } } if rI < rLen { // Take the RHS item, merge with matching LHS item if possible - pe := rhsOrder[rI] - lChild, _ := observedLHS.Get(pe) // may be nil + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -271,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { var errs ValidationErrors length := 0 if list != nil { @@ -281,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( pes := make([]fieldpath.PathElement, 0, length) for i := 0; i < length; i++ { child := list.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -289,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( // this element. continue } - if _, found := observed.Get(pe); found { + if _, found := observed.Get(pe); found && !allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) } - observed.Insert(pe, child) pes = append(pes, pe) } return pes, observed, errs diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 3949a78fc..4258ee5ba 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool { // FromYAML parses a yaml string into an object with the current schema // and the type "typename" or an error if validation fails. -func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { var v interface{} err := yaml.Unmarshal([]byte(object), &v) if err != nil { return nil, err } - return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) } // FromUnstructured converts a go "interface{}" type, typically an @@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { // The provided interface{} must be one of: map[string]interface{}, // map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. -func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { - return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) } // FromStructured converts a go "interface{}" type, typically an structured object in @@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { // schema validation. The provided "interface{}" value must be a pointer so that the // value can be modified via reflection. The provided "interface{}" may contain structs // and types that are converted to Values by the jsonMarshaler interface. -func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { v, err := value.NewValueReflect(in) if err != nil { return nil, fmt.Errorf("error creating struct value reflector: %v", err) } - return AsTyped(v, p.Schema, p.TypeRef) + return AsTyped(v, p.Schema, p.TypeRef, opts...) } // DeducedParseableType is a ParseableType that deduces the type from diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index a338d761d..ad071ee8f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { - i, item := iter.Item() + _, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go index 047efff05..d563a87ee 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index d63a97fe2..9be902828 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "fmt" - "strings" "sync" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -26,16 +24,24 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/value" ) +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. -func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { tv := &TypedValue{ value: v, typeRef: typeRef, schema: s, } - if err := tv.Validate(); err != nil { + if err := tv.Validate(opts...); err != nil { return nil, err } return tv, nil @@ -45,6 +51,10 @@ func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedVal // conforms to the schema, for cases where that has already been checked or // where you're going to call a method that validates as a side-effect (like // ToFieldSet). +// +// Deprecated: This function was initially created because validation +// was expensive. Now that this has been solved, objects should always +// be created as validated, using `AsTyped`. func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { tv := &TypedValue{ value: v, @@ -77,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema { } // Validate returns an error with a list of every spec violation. -func (tv TypedValue) Validate() error { +func (tv TypedValue) Validate(opts ...ValidationOptions) error { w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } defer w.finished() if errs := w.validate(nil); len(errs) != 0 { return errs @@ -113,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { return merge(&tv, pso, ruleKeepRHS, nil) } +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + // Compare compares the two objects. See the comments on the `Comparison` // struct for details on the return value. // @@ -120,33 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { - c = &Comparison{ + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ Removed: fieldpath.NewSet(), Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - _, err = merge(&tv, rhs, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } else if !value.Equals(w.rhs, w.lhs) { - // TODO: Equality is not sufficient for this. - // Need to implement equality check on the value type. - c.Modified.Insert(w.path) - } - }, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } - }) - if err != nil { - return nil, err + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() } - return c, nil + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil } // RemoveItems removes each provided list or map item from the value. @@ -161,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { return &tv } -// NormalizeUnions takes the new object and normalizes the union: -// - If discriminator changed to non-nil, and a new field has been added -// that doesn't match, an error is returned, -// - If discriminator hasn't changed and two fields or more are set, an -// error is returned, -// - If discriminator changed to non-nil, all other fields but the -// discriminated one will be cleared, -// - Otherwise, If only one field is left, update discriminator to that value. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnions(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - -// NormalizeUnionsApply specifically normalize unions on apply. It -// validates that the applied union is correct (there should be no -// ambiguity there), and clear the fields according to the sent intent. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnionsApply(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - func (tv TypedValue) Empty() *TypedValue { tv.value = value.NewValueInterface(nil) return &tv @@ -273,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) } return out, nil } - -// Comparison is the return value of a TypedValue.Compare() operation. -// -// No field will appear in more than one of the three fieldsets. If all of the -// fieldsets are empty, then the objects must have been equal. -type Comparison struct { - // Removed contains any fields removed by rhs (the right-hand-side - // object in the comparison). - Removed *fieldpath.Set - // Modified contains fields present in both objects but different. - Modified *fieldpath.Set - // Added contains any fields added by rhs. - Added *fieldpath.Set -} - -// IsSame returns true if the comparison returned no changes (the two -// compared objects are similar). -func (c *Comparison) IsSame() bool { - return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() -} - -// String returns a human readable version of the comparison. -func (c *Comparison) String() string { - bld := strings.Builder{} - if !c.Modified.Empty() { - bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) - } - if !c.Added.Empty() { - bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) - } - if !c.Removed.Empty() { - bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) - } - return bld.String() -} - -// ExcludeFields fields from the compare recursively removes the fields -// from the entire comparison -func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { - if fields == nil || fields.Empty() { - return c - } - c.Removed = c.Removed.RecursiveDifference(fields) - c.Modified = c.Modified.RecursiveDifference(fields) - c.Added = c.Added.RecursiveDifference(fields) - return c -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go deleted file mode 100644 index 1fa5d88ae..000000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package typed - -import ( - "fmt" - "strings" - - "sigs.k8s.io/structured-merge-diff/v4/schema" - "sigs.k8s.io/structured-merge-diff/v4/value" -) - -func normalizeUnions(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - for _, union := range atom.Map.Unions { - if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { - return err - } - } - return nil -} - -func normalizeUnionsApply(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - - for _, union := range atom.Map.Unions { - out := value.NewValueInterface(*w.out) - if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { - return err - } - *w.out = out.Unstructured() - } - return nil -} - -type discriminated string -type field string - -type discriminatedNames struct { - f2d map[field]discriminated - d2f map[discriminated]field -} - -func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { - d2f := map[discriminated]field{} - for key, value := range f2d { - d2f[value] = key - } - return discriminatedNames{ - f2d: f2d, - d2f: d2f, - } -} - -func (dn discriminatedNames) toField(d discriminated) field { - if f, ok := dn.d2f[d]; ok { - return f - } - return field(d) -} - -func (dn discriminatedNames) toDiscriminated(f field) discriminated { - if d, ok := dn.f2d[f]; ok { - return d - } - return discriminated(f) -} - -type discriminator struct { - name string -} - -func (d *discriminator) Set(m value.Map, v discriminated) { - if d == nil { - return - } - m.Set(d.name, value.NewValueInterface(string(v))) -} - -func (d *discriminator) Get(m value.Map) discriminated { - if d == nil || m == nil { - return "" - } - val, ok := m.Get(d.name) - if !ok { - return "" - } - if !val.IsString() { - return "" - } - return discriminated(val.AsString()) -} - -type fieldsSet map[field]struct{} - -// newFieldsSet returns a map of the fields that are part of the union and are set -// in the given map. -func newFieldsSet(m value.Map, fields []field) fieldsSet { - if m == nil { - return nil - } - set := fieldsSet{} - for _, f := range fields { - if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { - set.Add(f) - } - } - return set -} - -func (fs fieldsSet) Add(f field) { - if fs == nil { - fs = map[field]struct{}{} - } - fs[f] = struct{}{} -} - -func (fs fieldsSet) One() *field { - for f := range fs { - return &f - } - return nil -} - -func (fs fieldsSet) Has(f field) bool { - _, ok := fs[f] - return ok -} - -func (fs fieldsSet) List() []field { - fields := []field{} - for f := range fs { - fields = append(fields, f) - } - return fields -} - -func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { - n := fieldsSet{} - for f := range fs { - if !o.Has(f) { - n.Add(f) - } - } - return n -} - -func (fs fieldsSet) String() string { - s := []string{} - for k := range fs { - s = append(s, string(k)) - } - return strings.Join(s, ", ") -} - -type union struct { - deduceInvalidDiscriminator bool - d *discriminator - dn discriminatedNames - f []field -} - -func newUnion(su *schema.Union) *union { - u := &union{} - if su.Discriminator != nil { - u.d = &discriminator{name: *su.Discriminator} - } - f2d := map[field]discriminated{} - for _, f := range su.Fields { - u.f = append(u.f, field(f.FieldName)) - f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) - } - u.dn = newDiscriminatedName(f2d) - u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator - return u -} - -// clear removes all the fields in map that are part of the union, but -// the one we decided to keep. -func (u *union) clear(m value.Map, f field) { - for _, fieldName := range u.f { - if field(fieldName) != f { - m.Delete(string(fieldName)) - } - } -} - -func (u *union) Normalize(old, new, out value.Map) error { - os := newFieldsSet(old, u.f) - ns := newFieldsSet(new, u.f) - diff := ns.Difference(os) - - if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { - if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { - return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) - } - if len(diff) > 1 { - return fmt.Errorf("multiple new fields added: %v", diff) - } - u.clear(out, u.dn.toField(u.d.Get(new))) - return nil - } - - if len(ns) > 1 { - return fmt.Errorf("multiple fields set without discriminator change: %v", ns) - } - - // Set discriminiator if it needs to be deduced. - if u.deduceInvalidDiscriminator && len(ns) == 1 { - u.d.Set(out, u.dn.toDiscriminated(*ns.One())) - } - - return nil -} - -func (u *union) NormalizeApply(applied, merged, out value.Map) error { - as := newFieldsSet(applied, u.f) - if len(as) > 1 { - return fmt.Errorf("more than one field of union applied: %v", as) - } - if len(as) == 0 { - // None is set, just leave. - return nil - } - // We have exactly one, discriminiator must match if set - if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { - return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) - } - - // Update discriminiator if needed - if u.deduceInvalidDiscriminator { - u.d.Set(out, u.dn.toDiscriminated(*as.One())) - } - // Clear others fields. - u.clear(out, *as.One()) - - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go index 378d30219..652e24c81 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + v.allowDuplicates = false if v.allocator == nil { v.allocator = value.NewFreelistAllocator() } @@ -49,6 +50,9 @@ type validatingObjectWalker struct { value value.Value schema *schema.Schema typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker @@ -102,6 +106,12 @@ func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs Valida if !v.IsBool() { return errorf("%vexpected boolean, got %v", prefix, v) } + case schema.Untyped: + if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() { + return errorf("%vexpected any scalar, got %v", prefix, v) + } + default: + return errorf("%vunexpected scalar type in schema: %v", prefix, *t) } return nil } @@ -123,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) pe.Index = &i } else { var err error - pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -131,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) // this element. return } - if observedKeys.Has(pe) { + if observedKeys.Has(pe) && !v.allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) } observedKeys.Insert(pe) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go index dc8b8c720..c38402b99 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go @@ -136,7 +136,7 @@ func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { if !ok { return false } - return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go index d8e208628..c3ae00b18 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go @@ -88,12 +88,12 @@ func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } @@ -168,12 +168,12 @@ func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index a5a467c0f..f0d58d42c 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -154,7 +154,9 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi if field.Type.Kind() == reflect.Ptr { e = field.Type.Elem() } - buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + if e.Kind() == reflect.Struct { + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + } continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore index e256a31e0..2dc92904e 100644 --- a/vendor/sigs.k8s.io/yaml/.gitignore +++ b/vendor/sigs.k8s.io/yaml/.gitignore @@ -6,6 +6,10 @@ .project .settings/** +# Idea files +.idea/** +.idea/ + # Emacs save files *~ diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml index d20e23eff..54ed8f9cb 100644 --- a/vendor/sigs.k8s.io/yaml/.travis.yml +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -1,8 +1,7 @@ language: go -dist: xenial -go: - - 1.12.x - - 1.13.x +arch: arm64 +dist: focal +go: 1.15.x script: - diff -u <(echo -n) <(gofmt -d *.go) - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE index 7805d36de..093d6d3ed 100644 --- a/vendor/sigs.k8s.io/yaml/LICENSE +++ b/vendor/sigs.k8s.io/yaml/LICENSE @@ -48,3 +48,259 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# The forked go-yaml.v3 library under this project is covered by two +different licenses (MIT and Apache): + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +# The forked go-yaml.v2 library under the project is covered by an +Apache license: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 325b40b07..003a149e1 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -2,26 +2,22 @@ approvers: - dims -- lavalamp +- jpbetz - smarterclayton - deads2k - sttts - liggitt -- caesarxuchao reviewers: - dims - thockin -- lavalamp +- jpbetz - smarterclayton - wojtek-t - deads2k - derekwaynecarr -- caesarxuchao - mikedanese - liggitt -- gmarek - sttts -- ncdc - tallclair labels: - sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md index 5a651d916..e81cc426b 100644 --- a/vendor/sigs.k8s.io/yaml/README.md +++ b/vendor/sigs.k8s.io/yaml/README.md @@ -107,8 +107,8 @@ func main() { } fmt.Println(string(y)) /* Output: - name: John age: 30 + name: John */ j2, err := yaml.YAMLToJSON(y) if err != nil { diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go index 235b7f2cf..0ea28bd03 100644 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -16,53 +16,53 @@ import ( "unicode/utf8" ) -// indirect walks down v allocating pointers as needed, +// indirect walks down 'value' allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, +func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If 'value' is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() + if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() { + value = value.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e + if value.Kind() == reflect.Interface && !value.IsNil() { + element := value.Elem() + if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) { + value = element continue } } - if v.Kind() != reflect.Ptr { + if value.Kind() != reflect.Ptr { break } - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() { break } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) + if value.IsNil() { + if value.CanSet() { + value.Set(reflect.New(value.Type().Elem())) } else { - v = reflect.New(v.Type().Elem()) + value = reflect.New(value.Type().Elem()) } } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { + if value.Type().NumMethod() > 0 { + if u, ok := value.Interface().(json.Unmarshaler); ok { return u, nil, reflect.Value{} } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + if u, ok := value.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } - v = v.Elem() + value = value.Elem() } - return nil, nil, v + return nil, nil, value } // A field represents a single field found in a struct. @@ -134,8 +134,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} @@ -348,8 +348,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See http://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and @@ -420,10 +421,8 @@ func equalFoldRight(s, t []byte) bool { t = t[size:] } - if len(t) > 0 { - return false - } - return true + + return len(t) <= 0 } // asciiEqualFold is a specialization of bytes.EqualFold for use when diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS new file mode 100644 index 000000000..73be0a3a9 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS @@ -0,0 +1,24 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +- natasha41575 +- knverey +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md new file mode 100644 index 000000000..53f4139dc --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md @@ -0,0 +1,143 @@ +# go-yaml fork + +This package is a fork of the go-yaml library and is intended solely for consumption +by kubernetes projects. In this fork, we plan to support only critical changes required for +kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests +should be made in the upstream go-yaml library, and we will reject such changes in this fork +unless we are pulling them from upstream. + +This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 + +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go new file mode 100644 index 000000000..acf71402c --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go new file mode 100644 index 000000000..129bc2a97 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go new file mode 100644 index 000000000..a1c2cc526 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go new file mode 100644 index 000000000..0ee738e11 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go new file mode 100644 index 000000000..81d05dfe5 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go new file mode 100644 index 000000000..7c1f5fac3 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go new file mode 100644 index 000000000..4120e0c91 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go new file mode 100644 index 000000000..0b9bb6030 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go new file mode 100644 index 000000000..4c45e660a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go new file mode 100644 index 000000000..a2dde608c --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go new file mode 100644 index 000000000..30813884c --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go new file mode 100644 index 000000000..f6a9c8e34 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go index efbc535d4..fc10246bd 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import ( @@ -8,56 +24,59 @@ import ( "reflect" "strconv" - "gopkg.in/yaml.v2" + "sigs.k8s.io/yaml/goyaml.v2" ) -// Marshal marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) +// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) +func Marshal(obj interface{}) ([]byte, error) { + jsonBytes, err := json.Marshal(obj) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: %w", err) } - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil + return JSONToYAML(jsonBytes) } // JSONOpt is a decoding option for decoding from JSON format. type JSONOpt func(*json.Decoder) *json.Decoder -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, false, opts...) +// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the +// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer. +// +// Important notes about the Unmarshal logic: +// +// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users. +// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this. +// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative. +// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process. +// - There are no compatibility guarantees for returned error values. +func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...) } -// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal -// into an object, optionally configuring the behavior of the JSON unmarshal. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions: +// +// - Duplicate fields in an object yield an error. This is according to the YAML specification. +// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error. +func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...) } -// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// unmarshal unmarshals the given YAML byte stream into the given interface, // optionally performing the unmarshalling strictly -func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { - vo := reflect.ValueOf(o) - unmarshalFn := yaml.Unmarshal - if strict { - unmarshalFn = yaml.UnmarshalStrict - } - j, err := yamlToJSON(y, &vo, unmarshalFn) +func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error { + jsonTarget := reflect.ValueOf(obj) + + jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn) if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) + return fmt.Errorf("error converting YAML to JSON: %w", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...) if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) + return fmt.Errorf("error unmarshaling JSON: %w", err) } return nil @@ -67,21 +86,26 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error // object, optionally applying decoder options prior to decoding. We are not // using json.Unmarshal directly as we want the chance to pass in non-default // options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) +func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(reader) for _, opt := range opts { d = opt(d) } - if err := d.Decode(&o); err != nil { + if err := d.Decode(&obj); err != nil { return fmt.Errorf("while decoding JSON: %v", err) } return nil } -// JSONToYAML Converts JSON to YAML. +// JSONToYAML converts JSON to YAML. Notable implementation details: +// +// - Duplicate fields, are case-sensitively ignored in an undefined order. +// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, // etc.) when unmarshalling to interface{}, it just picks float64 @@ -93,35 +117,46 @@ func JSONToYAML(j []byte) ([]byte, error) { } // Marshal this object into YAML. - return yaml.Marshal(jsonObj) + yamlBytes, err := yaml.Marshal(jsonObj) + if err != nil { + return nil, err + } + + return yamlBytes, nil } // YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, // passing JSON through this method should be a no-op. // -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. +// Some things YAML can do that are not supported by JSON: +// - In YAML you can have binary and null keys in your maps. These are invalid +// in JSON, and therefore int, bool and float keys are converted to strings implicitly. +// - Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// - And more... read the YAML specification for more details. +// +// Notable about the implementation: // -// For strict decoding of YAML, use YAMLToJSONStrict. +// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. +// - There are no compatibility guarantees for returned error values. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) + return yamlToJSONTarget(y, nil, yaml.Unmarshal) } // YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, // returning an error on any duplicate field names. func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := unmarshalFn(yamlBytes, &yamlObj) if err != nil { return nil, err } @@ -136,7 +171,11 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, } // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) + jsonBytes, err := json.Marshal(jsonObj) + if err != nil { + return nil, err + } + return jsonBytes, nil } func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { @@ -147,13 +186,13 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in // decoding into the value, we're just checking if the ultimate target is a // string. if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) + jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false) // We have a JSON or Text Umarshaler at this level, so we can't be trying // to decode into a string. - if ju != nil || tu != nil { + if jsonUnmarshaler != nil || textUnmarshaler != nil { jsonTarget = nil } else { - jsonTarget = &pv + jsonTarget = &pointerValue } } @@ -205,7 +244,7 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in keyString = "false" } default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v", reflect.TypeOf(k), k, v) } diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go index ab3e06a22..94abc1719 100644 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -1,7 +1,24 @@ // This file contains changes that are only compatible with go 1.10 and onwards. +//go:build go1.10 // +build go1.10 +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import "encoding/json"